lole25 commited on
Commit
0e8bad8
·
verified ·
1 Parent(s): 4d9d40e

Model save

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: DUAL-GPO/phi-2-gpo-new-i0
9
+ model-index:
10
+ - name: phi-2-gpo-v20-i1
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-gpo-v20-i1
18
+
19
+ This model is a fine-tuned version of [DUAL-GPO/phi-2-gpo-new-i0](https://huggingface.co/DUAL-GPO/phi-2-gpo-new-i0) on the None dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-06
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 4
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 16
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: cosine
47
+ - lr_scheduler_warmup_ratio: 0.1
48
+ - num_epochs: 1
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - PEFT 0.7.1
57
+ - Transformers 4.36.2
58
+ - Pytorch 2.1.2+cu121
59
+ - Datasets 2.14.6
60
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:156c027a5aef2ee9afc5576c7f2ab1f8a5ca085979ec5bd00c2d495021ca8cb6
3
  size 167807296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1427b2200d950b3c6817d647ae52086b1d13da4be617ca1f7da52d6446e39f72
3
  size 167807296
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.3152459278702736,
4
+ "train_runtime": 5630.9992,
5
+ "train_samples": 9600,
6
+ "train_samples_per_second": 1.705,
7
+ "train_steps_per_second": 0.107
8
+ }
runs/May15_01-01-24_gpu4-119-5/events.out.tfevents.1715698968.gpu4-119-5.2148799.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f04176c536b1aa7b55aa366c820b0eec9f1a7c8fccf41942dbfe41ef80dbd01c
3
- size 37018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:438278cee20ac92fc6ef15337a15b342e3fb7e6993ab9a8052c9976c782aa310
3
+ size 43712
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.3152459278702736,
4
+ "train_runtime": 5630.9992,
5
+ "train_samples": 9600,
6
+ "train_samples_per_second": 1.705,
7
+ "train_steps_per_second": 0.107
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 8.333333333333334e-08,
14
+ "logits/chosen": 0.1956433206796646,
15
+ "logits/rejected": 0.2183472067117691,
16
+ "logps/chosen": -380.8453369140625,
17
+ "logps/rejected": -356.37103271484375,
18
+ "loss": 0.409,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 8.333333333333333e-07,
28
+ "logits/chosen": 0.09696108102798462,
29
+ "logits/rejected": 0.21700751781463623,
30
+ "logps/chosen": -387.7613525390625,
31
+ "logps/rejected": -328.28912353515625,
32
+ "loss": 0.3709,
33
+ "rewards/accuracies": 0.4236111044883728,
34
+ "rewards/chosen": 2.4463388399453834e-05,
35
+ "rewards/margins": 3.930643651983701e-05,
36
+ "rewards/rejected": -1.4843050848867279e-05,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.03,
41
+ "learning_rate": 1.6666666666666667e-06,
42
+ "logits/chosen": 0.13495060801506042,
43
+ "logits/rejected": 0.22846023738384247,
44
+ "logps/chosen": -338.4651184082031,
45
+ "logps/rejected": -336.63897705078125,
46
+ "loss": 0.3742,
47
+ "rewards/accuracies": 0.4312500059604645,
48
+ "rewards/chosen": 6.384013977367431e-05,
49
+ "rewards/margins": -0.00011528225149959326,
50
+ "rewards/rejected": 0.00017912239127326757,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.05,
55
+ "learning_rate": 2.5e-06,
56
+ "logits/chosen": 0.1001209244132042,
57
+ "logits/rejected": 0.2776273190975189,
58
+ "logps/chosen": -342.38140869140625,
59
+ "logps/rejected": -354.812744140625,
60
+ "loss": 0.3626,
61
+ "rewards/accuracies": 0.4312500059604645,
62
+ "rewards/chosen": 3.7229918234515935e-05,
63
+ "rewards/margins": 9.480830340180546e-05,
64
+ "rewards/rejected": -5.757838880526833e-05,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.07,
69
+ "learning_rate": 3.3333333333333333e-06,
70
+ "logits/chosen": 0.12598159909248352,
71
+ "logits/rejected": 0.21262428164482117,
72
+ "logps/chosen": -343.973876953125,
73
+ "logps/rejected": -333.87969970703125,
74
+ "loss": 0.3845,
75
+ "rewards/accuracies": 0.5687500238418579,
76
+ "rewards/chosen": 9.938174480339512e-05,
77
+ "rewards/margins": 0.00036102396552450955,
78
+ "rewards/rejected": -0.0002616422134451568,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.08,
83
+ "learning_rate": 4.166666666666667e-06,
84
+ "logits/chosen": 0.1064462661743164,
85
+ "logits/rejected": 0.1775200366973877,
86
+ "logps/chosen": -351.95745849609375,
87
+ "logps/rejected": -328.10955810546875,
88
+ "loss": 0.3802,
89
+ "rewards/accuracies": 0.543749988079071,
90
+ "rewards/chosen": 0.00037883114418946207,
91
+ "rewards/margins": 0.0006485713529400527,
92
+ "rewards/rejected": -0.0002697401796467602,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.1,
97
+ "learning_rate": 5e-06,
98
+ "logits/chosen": 0.10294230282306671,
99
+ "logits/rejected": 0.15821890532970428,
100
+ "logps/chosen": -332.59686279296875,
101
+ "logps/rejected": -317.3880310058594,
102
+ "loss": 0.3864,
103
+ "rewards/accuracies": 0.6000000238418579,
104
+ "rewards/chosen": 0.0005501825944520533,
105
+ "rewards/margins": 0.0013122591190040112,
106
+ "rewards/rejected": -0.0007620764663442969,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.12,
111
+ "learning_rate": 4.995770395678171e-06,
112
+ "logits/chosen": 0.12952658534049988,
113
+ "logits/rejected": 0.21082790195941925,
114
+ "logps/chosen": -337.88616943359375,
115
+ "logps/rejected": -337.0188903808594,
116
+ "loss": 0.3761,
117
+ "rewards/accuracies": 0.6625000238418579,
118
+ "rewards/chosen": 0.0009980153990909457,
119
+ "rewards/margins": 0.002471885411068797,
120
+ "rewards/rejected": -0.0014738701283931732,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.13,
125
+ "learning_rate": 4.983095894354858e-06,
126
+ "logits/chosen": 0.09408712387084961,
127
+ "logits/rejected": 0.1505356878042221,
128
+ "logps/chosen": -326.36724853515625,
129
+ "logps/rejected": -358.09759521484375,
130
+ "loss": 0.3757,
131
+ "rewards/accuracies": 0.668749988079071,
132
+ "rewards/chosen": 0.0015949418302625418,
133
+ "rewards/margins": 0.004127982072532177,
134
+ "rewards/rejected": -0.0025330407079309225,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.15,
139
+ "learning_rate": 4.962019382530521e-06,
140
+ "logits/chosen": 0.11112242937088013,
141
+ "logits/rejected": 0.1587546467781067,
142
+ "logps/chosen": -337.84051513671875,
143
+ "logps/rejected": -348.18634033203125,
144
+ "loss": 0.3735,
145
+ "rewards/accuracies": 0.6625000238418579,
146
+ "rewards/chosen": 0.0002811970771290362,
147
+ "rewards/margins": 0.005385980941355228,
148
+ "rewards/rejected": -0.0051047843880951405,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.17,
153
+ "learning_rate": 4.93261217644956e-06,
154
+ "logits/chosen": 0.13481362164020538,
155
+ "logits/rejected": 0.19625934958457947,
156
+ "logps/chosen": -347.8192443847656,
157
+ "logps/rejected": -345.80511474609375,
158
+ "loss": 0.3628,
159
+ "rewards/accuracies": 0.637499988079071,
160
+ "rewards/chosen": -0.0003084948402829468,
161
+ "rewards/margins": 0.01107403077185154,
162
+ "rewards/rejected": -0.01138252578675747,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.18,
167
+ "learning_rate": 4.894973780788722e-06,
168
+ "logits/chosen": 0.10719671100378036,
169
+ "logits/rejected": 0.2091417759656906,
170
+ "logps/chosen": -347.8533630371094,
171
+ "logps/rejected": -352.93115234375,
172
+ "loss": 0.3404,
173
+ "rewards/accuracies": 0.6875,
174
+ "rewards/chosen": -0.0032309088855981827,
175
+ "rewards/margins": 0.021253572776913643,
176
+ "rewards/rejected": -0.024484481662511826,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.2,
181
+ "learning_rate": 4.849231551964771e-06,
182
+ "logits/chosen": 0.13324591517448425,
183
+ "logits/rejected": 0.1466611921787262,
184
+ "logps/chosen": -377.8055114746094,
185
+ "logps/rejected": -370.2322082519531,
186
+ "loss": 0.3684,
187
+ "rewards/accuracies": 0.606249988079071,
188
+ "rewards/chosen": -0.022458815947175026,
189
+ "rewards/margins": 0.015764199197292328,
190
+ "rewards/rejected": -0.0382230170071125,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.22,
195
+ "learning_rate": 4.7955402672006855e-06,
196
+ "logits/chosen": 0.11208464950323105,
197
+ "logits/rejected": 0.21028931438922882,
198
+ "logps/chosen": -390.74896240234375,
199
+ "logps/rejected": -424.40826416015625,
200
+ "loss": 0.3223,
201
+ "rewards/accuracies": 0.7437499761581421,
202
+ "rewards/chosen": -0.043638426810503006,
203
+ "rewards/margins": 0.047044601291418076,
204
+ "rewards/rejected": -0.09068302810192108,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.23,
209
+ "learning_rate": 4.734081600808531e-06,
210
+ "logits/chosen": 0.16790533065795898,
211
+ "logits/rejected": 0.20132648944854736,
212
+ "logps/chosen": -450.4337463378906,
213
+ "logps/rejected": -493.0623474121094,
214
+ "loss": 0.3234,
215
+ "rewards/accuracies": 0.6625000238418579,
216
+ "rewards/chosen": -0.11827051639556885,
217
+ "rewards/margins": 0.043106190860271454,
218
+ "rewards/rejected": -0.1613767296075821,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 0.25,
223
+ "learning_rate": 4.665063509461098e-06,
224
+ "logits/chosen": 0.1597878634929657,
225
+ "logits/rejected": 0.22755786776542664,
226
+ "logps/chosen": -539.1220703125,
227
+ "logps/rejected": -538.5697021484375,
228
+ "loss": 0.3578,
229
+ "rewards/accuracies": 0.65625,
230
+ "rewards/chosen": -0.17835457623004913,
231
+ "rewards/margins": 0.05051771551370621,
232
+ "rewards/rejected": -0.22887229919433594,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 0.27,
237
+ "learning_rate": 4.588719528532342e-06,
238
+ "logits/chosen": 0.19429577887058258,
239
+ "logits/rejected": 0.2786257565021515,
240
+ "logps/chosen": -445.50531005859375,
241
+ "logps/rejected": -506.572998046875,
242
+ "loss": 0.3039,
243
+ "rewards/accuracies": 0.668749988079071,
244
+ "rewards/chosen": -0.1227174624800682,
245
+ "rewards/margins": 0.07187845557928085,
246
+ "rewards/rejected": -0.19459593296051025,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 0.28,
251
+ "learning_rate": 4.50530798188761e-06,
252
+ "logits/chosen": 0.15851017832756042,
253
+ "logits/rejected": 0.28014975786209106,
254
+ "logps/chosen": -546.3292236328125,
255
+ "logps/rejected": -604.9276123046875,
256
+ "loss": 0.32,
257
+ "rewards/accuracies": 0.6812499761581421,
258
+ "rewards/chosen": -0.15538683533668518,
259
+ "rewards/margins": 0.07926145941019058,
260
+ "rewards/rejected": -0.23464830219745636,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 0.3,
265
+ "learning_rate": 4.415111107797445e-06,
266
+ "logits/chosen": 0.16010740399360657,
267
+ "logits/rejected": 0.2813720405101776,
268
+ "logps/chosen": -585.3643798828125,
269
+ "logps/rejected": -665.8712158203125,
270
+ "loss": 0.3048,
271
+ "rewards/accuracies": 0.6937500238418579,
272
+ "rewards/chosen": -0.2374914586544037,
273
+ "rewards/margins": 0.11958248913288116,
274
+ "rewards/rejected": -0.35707396268844604,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 0.32,
279
+ "learning_rate": 4.318434103932622e-06,
280
+ "logits/chosen": 0.20392921566963196,
281
+ "logits/rejected": 0.2997474670410156,
282
+ "logps/chosen": -593.7650146484375,
283
+ "logps/rejected": -700.028076171875,
284
+ "loss": 0.3191,
285
+ "rewards/accuracies": 0.6187499761581421,
286
+ "rewards/chosen": -0.27146199345588684,
287
+ "rewards/margins": 0.11621884256601334,
288
+ "rewards/rejected": -0.3876808285713196,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 0.33,
293
+ "learning_rate": 4.215604094671835e-06,
294
+ "logits/chosen": 0.18635989725589752,
295
+ "logits/rejected": 0.25986188650131226,
296
+ "logps/chosen": -575.0554809570312,
297
+ "logps/rejected": -709.1888427734375,
298
+ "loss": 0.2834,
299
+ "rewards/accuracies": 0.699999988079071,
300
+ "rewards/chosen": -0.22922906279563904,
301
+ "rewards/margins": 0.12650123238563538,
302
+ "rewards/rejected": -0.3557303547859192,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 0.35,
307
+ "learning_rate": 4.106969024216348e-06,
308
+ "logits/chosen": 0.21329033374786377,
309
+ "logits/rejected": 0.35237234830856323,
310
+ "logps/chosen": -569.5208129882812,
311
+ "logps/rejected": -619.1517944335938,
312
+ "loss": 0.3111,
313
+ "rewards/accuracies": 0.6499999761581421,
314
+ "rewards/chosen": -0.1869223415851593,
315
+ "rewards/margins": 0.08501409739255905,
316
+ "rewards/rejected": -0.27193647623062134,
317
+ "step": 210
318
+ },
319
+ {
320
+ "epoch": 0.37,
321
+ "learning_rate": 3.992896479256966e-06,
322
+ "logits/chosen": 0.22448810935020447,
323
+ "logits/rejected": 0.2712326645851135,
324
+ "logps/chosen": -574.156982421875,
325
+ "logps/rejected": -612.8103637695312,
326
+ "loss": 0.3201,
327
+ "rewards/accuracies": 0.6625000238418579,
328
+ "rewards/chosen": -0.1946686953306198,
329
+ "rewards/margins": 0.07545152306556702,
330
+ "rewards/rejected": -0.270120233297348,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 0.38,
335
+ "learning_rate": 3.8737724451770155e-06,
336
+ "logits/chosen": 0.21266920864582062,
337
+ "logits/rejected": 0.241852805018425,
338
+ "logps/chosen": -489.32073974609375,
339
+ "logps/rejected": -567.0352783203125,
340
+ "loss": 0.3088,
341
+ "rewards/accuracies": 0.6187499761581421,
342
+ "rewards/chosen": -0.18188850581645966,
343
+ "rewards/margins": 0.09176473319530487,
344
+ "rewards/rejected": -0.2736532390117645,
345
+ "step": 230
346
+ },
347
+ {
348
+ "epoch": 0.4,
349
+ "learning_rate": 3.7500000000000005e-06,
350
+ "logits/chosen": 0.21173226833343506,
351
+ "logits/rejected": 0.27939146757125854,
352
+ "logps/chosen": -584.0433349609375,
353
+ "logps/rejected": -665.869873046875,
354
+ "loss": 0.2928,
355
+ "rewards/accuracies": 0.668749988079071,
356
+ "rewards/chosen": -0.22605593502521515,
357
+ "rewards/margins": 0.10531251132488251,
358
+ "rewards/rejected": -0.33136844635009766,
359
+ "step": 240
360
+ },
361
+ {
362
+ "epoch": 0.42,
363
+ "learning_rate": 3.621997950501156e-06,
364
+ "logits/chosen": 0.26176512241363525,
365
+ "logits/rejected": 0.28036120533943176,
366
+ "logps/chosen": -581.51318359375,
367
+ "logps/rejected": -680.9810791015625,
368
+ "loss": 0.3056,
369
+ "rewards/accuracies": 0.6625000238418579,
370
+ "rewards/chosen": -0.2372002899646759,
371
+ "rewards/margins": 0.08836118876934052,
372
+ "rewards/rejected": -0.3255614936351776,
373
+ "step": 250
374
+ },
375
+ {
376
+ "epoch": 0.43,
377
+ "learning_rate": 3.4901994150978926e-06,
378
+ "logits/chosen": 0.18492260575294495,
379
+ "logits/rejected": 0.3097878098487854,
380
+ "logps/chosen": -620.8358154296875,
381
+ "logps/rejected": -666.6085815429688,
382
+ "loss": 0.3084,
383
+ "rewards/accuracies": 0.675000011920929,
384
+ "rewards/chosen": -0.24568219482898712,
385
+ "rewards/margins": 0.09036718308925629,
386
+ "rewards/rejected": -0.3360493779182434,
387
+ "step": 260
388
+ },
389
+ {
390
+ "epoch": 0.45,
391
+ "learning_rate": 3.3550503583141726e-06,
392
+ "logits/chosen": 0.22367191314697266,
393
+ "logits/rejected": 0.3482346534729004,
394
+ "logps/chosen": -595.0042114257812,
395
+ "logps/rejected": -694.3624877929688,
396
+ "loss": 0.294,
397
+ "rewards/accuracies": 0.65625,
398
+ "rewards/chosen": -0.23263101279735565,
399
+ "rewards/margins": 0.1014803871512413,
400
+ "rewards/rejected": -0.33411139249801636,
401
+ "step": 270
402
+ },
403
+ {
404
+ "epoch": 0.47,
405
+ "learning_rate": 3.217008081777726e-06,
406
+ "logits/chosen": 0.2801993191242218,
407
+ "logits/rejected": 0.31503865122795105,
408
+ "logps/chosen": -591.4317626953125,
409
+ "logps/rejected": -664.4125366210938,
410
+ "loss": 0.3276,
411
+ "rewards/accuracies": 0.65625,
412
+ "rewards/chosen": -0.24832281470298767,
413
+ "rewards/margins": 0.0828218162059784,
414
+ "rewards/rejected": -0.33114463090896606,
415
+ "step": 280
416
+ },
417
+ {
418
+ "epoch": 0.48,
419
+ "learning_rate": 3.0765396768561005e-06,
420
+ "logits/chosen": 0.23998506367206573,
421
+ "logits/rejected": 0.24142727255821228,
422
+ "logps/chosen": -615.881103515625,
423
+ "logps/rejected": -747.1182250976562,
424
+ "loss": 0.2889,
425
+ "rewards/accuracies": 0.6812499761581421,
426
+ "rewards/chosen": -0.2754364013671875,
427
+ "rewards/margins": 0.12126360088586807,
428
+ "rewards/rejected": -0.3966999650001526,
429
+ "step": 290
430
+ },
431
+ {
432
+ "epoch": 0.5,
433
+ "learning_rate": 2.9341204441673267e-06,
434
+ "logits/chosen": 0.26361778378486633,
435
+ "logits/rejected": 0.31506967544555664,
436
+ "logps/chosen": -607.7698974609375,
437
+ "logps/rejected": -754.2791137695312,
438
+ "loss": 0.3049,
439
+ "rewards/accuracies": 0.6625000238418579,
440
+ "rewards/chosen": -0.2881506085395813,
441
+ "rewards/margins": 0.12216220051050186,
442
+ "rewards/rejected": -0.41031280159950256,
443
+ "step": 300
444
+ },
445
+ {
446
+ "epoch": 0.52,
447
+ "learning_rate": 2.7902322853130758e-06,
448
+ "logits/chosen": 0.2686136066913605,
449
+ "logits/rejected": 0.32013028860092163,
450
+ "logps/chosen": -627.7980346679688,
451
+ "logps/rejected": -742.2149047851562,
452
+ "loss": 0.3474,
453
+ "rewards/accuracies": 0.6187499761581421,
454
+ "rewards/chosen": -0.2792850434780121,
455
+ "rewards/margins": 0.09415403008460999,
456
+ "rewards/rejected": -0.3734390139579773,
457
+ "step": 310
458
+ },
459
+ {
460
+ "epoch": 0.53,
461
+ "learning_rate": 2.6453620722761897e-06,
462
+ "logits/chosen": 0.15263831615447998,
463
+ "logits/rejected": 0.2327421009540558,
464
+ "logps/chosen": -628.375,
465
+ "logps/rejected": -704.6365966796875,
466
+ "loss": 0.2827,
467
+ "rewards/accuracies": 0.6812499761581421,
468
+ "rewards/chosen": -0.2568105161190033,
469
+ "rewards/margins": 0.10418572276830673,
470
+ "rewards/rejected": -0.360996276140213,
471
+ "step": 320
472
+ },
473
+ {
474
+ "epoch": 0.55,
475
+ "learning_rate": 2.5e-06,
476
+ "logits/chosen": 0.16394071280956268,
477
+ "logits/rejected": 0.21737909317016602,
478
+ "logps/chosen": -613.1915893554688,
479
+ "logps/rejected": -711.7744750976562,
480
+ "loss": 0.3134,
481
+ "rewards/accuracies": 0.6812499761581421,
482
+ "rewards/chosen": -0.2661852240562439,
483
+ "rewards/margins": 0.10473362356424332,
484
+ "rewards/rejected": -0.370918869972229,
485
+ "step": 330
486
+ },
487
+ {
488
+ "epoch": 0.57,
489
+ "learning_rate": 2.3546379277238107e-06,
490
+ "logits/chosen": 0.21221554279327393,
491
+ "logits/rejected": 0.24156931042671204,
492
+ "logps/chosen": -627.9307861328125,
493
+ "logps/rejected": -728.6505126953125,
494
+ "loss": 0.3131,
495
+ "rewards/accuracies": 0.643750011920929,
496
+ "rewards/chosen": -0.28867658972740173,
497
+ "rewards/margins": 0.09915556758642197,
498
+ "rewards/rejected": -0.3878321349620819,
499
+ "step": 340
500
+ },
501
+ {
502
+ "epoch": 0.58,
503
+ "learning_rate": 2.2097677146869242e-06,
504
+ "logits/chosen": 0.11871862411499023,
505
+ "logits/rejected": 0.22523121535778046,
506
+ "logps/chosen": -647.56298828125,
507
+ "logps/rejected": -767.3802490234375,
508
+ "loss": 0.2886,
509
+ "rewards/accuracies": 0.6937500238418579,
510
+ "rewards/chosen": -0.30263960361480713,
511
+ "rewards/margins": 0.11332044750452042,
512
+ "rewards/rejected": -0.41596007347106934,
513
+ "step": 350
514
+ },
515
+ {
516
+ "epoch": 0.6,
517
+ "learning_rate": 2.0658795558326745e-06,
518
+ "logits/chosen": 0.21589894592761993,
519
+ "logits/rejected": 0.2351434975862503,
520
+ "logps/chosen": -660.6466064453125,
521
+ "logps/rejected": -783.96875,
522
+ "loss": 0.303,
523
+ "rewards/accuracies": 0.7124999761581421,
524
+ "rewards/chosen": -0.3056688904762268,
525
+ "rewards/margins": 0.12556755542755127,
526
+ "rewards/rejected": -0.4312364161014557,
527
+ "step": 360
528
+ },
529
+ {
530
+ "epoch": 0.62,
531
+ "learning_rate": 1.9234603231439e-06,
532
+ "logits/chosen": 0.18540482223033905,
533
+ "logits/rejected": 0.20940378308296204,
534
+ "logps/chosen": -629.5787963867188,
535
+ "logps/rejected": -739.3985595703125,
536
+ "loss": 0.2899,
537
+ "rewards/accuracies": 0.65625,
538
+ "rewards/chosen": -0.30902260541915894,
539
+ "rewards/margins": 0.10534413158893585,
540
+ "rewards/rejected": -0.41436678171157837,
541
+ "step": 370
542
+ },
543
+ {
544
+ "epoch": 0.63,
545
+ "learning_rate": 1.7829919182222752e-06,
546
+ "logits/chosen": 0.1524067372083664,
547
+ "logits/rejected": 0.23221668601036072,
548
+ "logps/chosen": -677.9874877929688,
549
+ "logps/rejected": -802.863037109375,
550
+ "loss": 0.2776,
551
+ "rewards/accuracies": 0.6875,
552
+ "rewards/chosen": -0.3159080147743225,
553
+ "rewards/margins": 0.14397332072257996,
554
+ "rewards/rejected": -0.4598813056945801,
555
+ "step": 380
556
+ },
557
+ {
558
+ "epoch": 0.65,
559
+ "learning_rate": 1.6449496416858285e-06,
560
+ "logits/chosen": 0.18434450030326843,
561
+ "logits/rejected": 0.24084284901618958,
562
+ "logps/chosen": -654.8961181640625,
563
+ "logps/rejected": -759.6239013671875,
564
+ "loss": 0.2896,
565
+ "rewards/accuracies": 0.637499988079071,
566
+ "rewards/chosen": -0.2936326861381531,
567
+ "rewards/margins": 0.12481866776943207,
568
+ "rewards/rejected": -0.41845136880874634,
569
+ "step": 390
570
+ },
571
+ {
572
+ "epoch": 0.67,
573
+ "learning_rate": 1.509800584902108e-06,
574
+ "logits/chosen": 0.1810900717973709,
575
+ "logits/rejected": 0.2414156198501587,
576
+ "logps/chosen": -614.9703979492188,
577
+ "logps/rejected": -757.6051025390625,
578
+ "loss": 0.326,
579
+ "rewards/accuracies": 0.6625000238418579,
580
+ "rewards/chosen": -0.31767985224723816,
581
+ "rewards/margins": 0.10633231699466705,
582
+ "rewards/rejected": -0.4240121841430664,
583
+ "step": 400
584
+ },
585
+ {
586
+ "epoch": 0.68,
587
+ "learning_rate": 1.3780020494988447e-06,
588
+ "logits/chosen": 0.18032656610012054,
589
+ "logits/rejected": 0.19579799473285675,
590
+ "logps/chosen": -655.676513671875,
591
+ "logps/rejected": -761.1375122070312,
592
+ "loss": 0.3115,
593
+ "rewards/accuracies": 0.6812499761581421,
594
+ "rewards/chosen": -0.3214338421821594,
595
+ "rewards/margins": 0.09715566784143448,
596
+ "rewards/rejected": -0.4185895025730133,
597
+ "step": 410
598
+ },
599
+ {
600
+ "epoch": 0.7,
601
+ "learning_rate": 1.2500000000000007e-06,
602
+ "logits/chosen": 0.15863251686096191,
603
+ "logits/rejected": 0.2124270498752594,
604
+ "logps/chosen": -599.48388671875,
605
+ "logps/rejected": -698.137939453125,
606
+ "loss": 0.3008,
607
+ "rewards/accuracies": 0.71875,
608
+ "rewards/chosen": -0.28964871168136597,
609
+ "rewards/margins": 0.10956676304340363,
610
+ "rewards/rejected": -0.3992155194282532,
611
+ "step": 420
612
+ },
613
+ {
614
+ "epoch": 0.72,
615
+ "learning_rate": 1.1262275548229852e-06,
616
+ "logits/chosen": 0.15977849066257477,
617
+ "logits/rejected": 0.2368887960910797,
618
+ "logps/chosen": -677.1764526367188,
619
+ "logps/rejected": -847.1414184570312,
620
+ "loss": 0.2538,
621
+ "rewards/accuracies": 0.731249988079071,
622
+ "rewards/chosen": -0.31619545817375183,
623
+ "rewards/margins": 0.16232061386108398,
624
+ "rewards/rejected": -0.478516161441803,
625
+ "step": 430
626
+ },
627
+ {
628
+ "epoch": 0.73,
629
+ "learning_rate": 1.0071035207430352e-06,
630
+ "logits/chosen": 0.1530456840991974,
631
+ "logits/rejected": 0.2543787360191345,
632
+ "logps/chosen": -682.7639770507812,
633
+ "logps/rejected": -803.2896728515625,
634
+ "loss": 0.2922,
635
+ "rewards/accuracies": 0.71875,
636
+ "rewards/chosen": -0.3221725821495056,
637
+ "rewards/margins": 0.1354692280292511,
638
+ "rewards/rejected": -0.4576418399810791,
639
+ "step": 440
640
+ },
641
+ {
642
+ "epoch": 0.75,
643
+ "learning_rate": 8.930309757836517e-07,
644
+ "logits/chosen": 0.14446057379245758,
645
+ "logits/rejected": 0.2153489887714386,
646
+ "logps/chosen": -625.8594970703125,
647
+ "logps/rejected": -766.3084716796875,
648
+ "loss": 0.2742,
649
+ "rewards/accuracies": 0.6875,
650
+ "rewards/chosen": -0.26791518926620483,
651
+ "rewards/margins": 0.13798010349273682,
652
+ "rewards/rejected": -0.40589532256126404,
653
+ "step": 450
654
+ },
655
+ {
656
+ "epoch": 0.77,
657
+ "learning_rate": 7.843959053281663e-07,
658
+ "logits/chosen": 0.19729985296726227,
659
+ "logits/rejected": 0.2402934730052948,
660
+ "logps/chosen": -648.9825439453125,
661
+ "logps/rejected": -759.14501953125,
662
+ "loss": 0.2909,
663
+ "rewards/accuracies": 0.6625000238418579,
664
+ "rewards/chosen": -0.2782381474971771,
665
+ "rewards/margins": 0.12825573980808258,
666
+ "rewards/rejected": -0.4064939022064209,
667
+ "step": 460
668
+ },
669
+ {
670
+ "epoch": 0.78,
671
+ "learning_rate": 6.815658960673782e-07,
672
+ "logits/chosen": 0.17204996943473816,
673
+ "logits/rejected": 0.2728912830352783,
674
+ "logps/chosen": -618.990966796875,
675
+ "logps/rejected": -784.3754272460938,
676
+ "loss": 0.2578,
677
+ "rewards/accuracies": 0.7749999761581421,
678
+ "rewards/chosen": -0.2586398720741272,
679
+ "rewards/margins": 0.15261724591255188,
680
+ "rewards/rejected": -0.4112570881843567,
681
+ "step": 470
682
+ },
683
+ {
684
+ "epoch": 0.8,
685
+ "learning_rate": 5.848888922025553e-07,
686
+ "logits/chosen": 0.13143357634544373,
687
+ "logits/rejected": 0.21312256157398224,
688
+ "logps/chosen": -622.7134399414062,
689
+ "logps/rejected": -721.8486328125,
690
+ "loss": 0.3171,
691
+ "rewards/accuracies": 0.668749988079071,
692
+ "rewards/chosen": -0.30107492208480835,
693
+ "rewards/margins": 0.08757724612951279,
694
+ "rewards/rejected": -0.38865217566490173,
695
+ "step": 480
696
+ },
697
+ {
698
+ "epoch": 0.82,
699
+ "learning_rate": 4.946920181123904e-07,
700
+ "logits/chosen": 0.13198260962963104,
701
+ "logits/rejected": 0.17995597422122955,
702
+ "logps/chosen": -632.0765380859375,
703
+ "logps/rejected": -736.373779296875,
704
+ "loss": 0.2857,
705
+ "rewards/accuracies": 0.737500011920929,
706
+ "rewards/chosen": -0.28522834181785583,
707
+ "rewards/margins": 0.11633868515491486,
708
+ "rewards/rejected": -0.4015669822692871,
709
+ "step": 490
710
+ },
711
+ {
712
+ "epoch": 0.83,
713
+ "learning_rate": 4.1128047146765936e-07,
714
+ "logits/chosen": 0.1584668904542923,
715
+ "logits/rejected": 0.249081090092659,
716
+ "logps/chosen": -613.0228271484375,
717
+ "logps/rejected": -733.0048828125,
718
+ "loss": 0.2768,
719
+ "rewards/accuracies": 0.637499988079071,
720
+ "rewards/chosen": -0.26978355646133423,
721
+ "rewards/margins": 0.1299673467874527,
722
+ "rewards/rejected": -0.39975088834762573,
723
+ "step": 500
724
+ },
725
+ {
726
+ "epoch": 0.85,
727
+ "learning_rate": 3.3493649053890325e-07,
728
+ "logits/chosen": 0.13333144783973694,
729
+ "logits/rejected": 0.25184234976768494,
730
+ "logps/chosen": -686.0787963867188,
731
+ "logps/rejected": -783.8185424804688,
732
+ "loss": 0.3103,
733
+ "rewards/accuracies": 0.6937500238418579,
734
+ "rewards/chosen": -0.3115057349205017,
735
+ "rewards/margins": 0.11691226065158844,
736
+ "rewards/rejected": -0.42841801047325134,
737
+ "step": 510
738
+ },
739
+ {
740
+ "epoch": 0.87,
741
+ "learning_rate": 2.6591839919146963e-07,
742
+ "logits/chosen": 0.1042710691690445,
743
+ "logits/rejected": 0.19632944464683533,
744
+ "logps/chosen": -641.197021484375,
745
+ "logps/rejected": -739.2056884765625,
746
+ "loss": 0.3089,
747
+ "rewards/accuracies": 0.625,
748
+ "rewards/chosen": -0.3029845356941223,
749
+ "rewards/margins": 0.11691125482320786,
750
+ "rewards/rejected": -0.41989579796791077,
751
+ "step": 520
752
+ },
753
+ {
754
+ "epoch": 0.88,
755
+ "learning_rate": 2.044597327993153e-07,
756
+ "logits/chosen": 0.13513818383216858,
757
+ "logits/rejected": 0.2279660403728485,
758
+ "logps/chosen": -638.3856201171875,
759
+ "logps/rejected": -733.2174072265625,
760
+ "loss": 0.3394,
761
+ "rewards/accuracies": 0.5874999761581421,
762
+ "rewards/chosen": -0.31149208545684814,
763
+ "rewards/margins": 0.09216032922267914,
764
+ "rewards/rejected": -0.4036524295806885,
765
+ "step": 530
766
+ },
767
+ {
768
+ "epoch": 0.9,
769
+ "learning_rate": 1.507684480352292e-07,
770
+ "logits/chosen": 0.13924403488636017,
771
+ "logits/rejected": 0.19593167304992676,
772
+ "logps/chosen": -614.1495971679688,
773
+ "logps/rejected": -716.1968994140625,
774
+ "loss": 0.2755,
775
+ "rewards/accuracies": 0.6812499761581421,
776
+ "rewards/chosen": -0.2909039855003357,
777
+ "rewards/margins": 0.11327805370092392,
778
+ "rewards/rejected": -0.4041820466518402,
779
+ "step": 540
780
+ },
781
+ {
782
+ "epoch": 0.92,
783
+ "learning_rate": 1.0502621921127776e-07,
784
+ "logits/chosen": 0.1402362883090973,
785
+ "logits/rejected": 0.2016625702381134,
786
+ "logps/chosen": -635.9324340820312,
787
+ "logps/rejected": -708.5713500976562,
788
+ "loss": 0.2907,
789
+ "rewards/accuracies": 0.6875,
790
+ "rewards/chosen": -0.30609840154647827,
791
+ "rewards/margins": 0.10378583520650864,
792
+ "rewards/rejected": -0.4098842144012451,
793
+ "step": 550
794
+ },
795
+ {
796
+ "epoch": 0.93,
797
+ "learning_rate": 6.738782355044048e-08,
798
+ "logits/chosen": 0.1220315471291542,
799
+ "logits/rejected": 0.1830691397190094,
800
+ "logps/chosen": -661.2696533203125,
801
+ "logps/rejected": -801.816162109375,
802
+ "loss": 0.2863,
803
+ "rewards/accuracies": 0.699999988079071,
804
+ "rewards/chosen": -0.3084556758403778,
805
+ "rewards/margins": 0.14121660590171814,
806
+ "rewards/rejected": -0.44967228174209595,
807
+ "step": 560
808
+ },
809
+ {
810
+ "epoch": 0.95,
811
+ "learning_rate": 3.798061746947995e-08,
812
+ "logits/chosen": 0.12625311315059662,
813
+ "logits/rejected": 0.19530022144317627,
814
+ "logps/chosen": -651.5577392578125,
815
+ "logps/rejected": -755.2259521484375,
816
+ "loss": 0.284,
817
+ "rewards/accuracies": 0.706250011920929,
818
+ "rewards/chosen": -0.2925582528114319,
819
+ "rewards/margins": 0.12503091990947723,
820
+ "rewards/rejected": -0.4175891876220703,
821
+ "step": 570
822
+ },
823
+ {
824
+ "epoch": 0.97,
825
+ "learning_rate": 1.6904105645142443e-08,
826
+ "logits/chosen": 0.16926367580890656,
827
+ "logits/rejected": 0.21377988159656525,
828
+ "logps/chosen": -626.6007080078125,
829
+ "logps/rejected": -761.6741943359375,
830
+ "loss": 0.2901,
831
+ "rewards/accuracies": 0.6625000238418579,
832
+ "rewards/chosen": -0.30488914251327515,
833
+ "rewards/margins": 0.11805585771799088,
834
+ "rewards/rejected": -0.4229450225830078,
835
+ "step": 580
836
+ },
837
+ {
838
+ "epoch": 0.98,
839
+ "learning_rate": 4.229604321829561e-09,
840
+ "logits/chosen": 0.12586399912834167,
841
+ "logits/rejected": 0.20252175629138947,
842
+ "logps/chosen": -655.3035278320312,
843
+ "logps/rejected": -739.29833984375,
844
+ "loss": 0.3106,
845
+ "rewards/accuracies": 0.643750011920929,
846
+ "rewards/chosen": -0.2980835437774658,
847
+ "rewards/margins": 0.1191561371088028,
848
+ "rewards/rejected": -0.4172397255897522,
849
+ "step": 590
850
+ },
851
+ {
852
+ "epoch": 1.0,
853
+ "learning_rate": 0.0,
854
+ "logits/chosen": 0.159867063164711,
855
+ "logits/rejected": 0.22124703228473663,
856
+ "logps/chosen": -676.0281982421875,
857
+ "logps/rejected": -778.9498291015625,
858
+ "loss": 0.2705,
859
+ "rewards/accuracies": 0.762499988079071,
860
+ "rewards/chosen": -0.28896114230155945,
861
+ "rewards/margins": 0.14466610550880432,
862
+ "rewards/rejected": -0.43362727761268616,
863
+ "step": 600
864
+ },
865
+ {
866
+ "epoch": 1.0,
867
+ "step": 600,
868
+ "total_flos": 0.0,
869
+ "train_loss": 0.3152459278702736,
870
+ "train_runtime": 5630.9992,
871
+ "train_samples_per_second": 1.705,
872
+ "train_steps_per_second": 0.107
873
+ }
874
+ ],
875
+ "logging_steps": 10,
876
+ "max_steps": 600,
877
+ "num_input_tokens_seen": 0,
878
+ "num_train_epochs": 1,
879
+ "save_steps": 100,
880
+ "total_flos": 0.0,
881
+ "train_batch_size": 4,
882
+ "trial_name": null,
883
+ "trial_params": null
884
+ }