lole25 commited on
Commit
ec33c3e
1 Parent(s): a46f0e7

Model save

Browse files
README.md CHANGED
@@ -2,13 +2,9 @@
2
  license: mit
3
  library_name: peft
4
  tags:
5
- - alignment-handbook
6
- - generated_from_trainer
7
  - trl
8
  - dpo
9
  - generated_from_trainer
10
- datasets:
11
- - HuggingFaceH4/ultrafeedback_binarized
12
  base_model: microsoft/phi-2
13
  model-index:
14
  - name: phi-2-gpo-ultrachat-lora-2
@@ -20,17 +16,17 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  # phi-2-gpo-ultrachat-lora-2
22
 
23
- This model is a fine-tuned version of [lole25/phi-2-sft-ultrachat-lora](https://huggingface.co/lole25/phi-2-sft-ultrachat-lora) on the HuggingFaceH4/ultrafeedback_binarized dataset.
24
  It achieves the following results on the evaluation set:
25
- - Loss: 0.0100
26
- - Rewards/chosen: -0.0005
27
- - Rewards/rejected: -0.0009
28
- - Rewards/accuracies: 0.2620
29
- - Rewards/margins: 0.0004
30
- - Logps/rejected: -94.2882
31
- - Logps/chosen: -91.7769
32
- - Logits/rejected: 0.8176
33
- - Logits/chosen: 0.7994
34
 
35
  ## Model description
36
 
@@ -65,9 +61,17 @@ The following hyperparameters were used during training:
65
 
66
  ### Training results
67
 
68
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
69
- |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
70
- | 0.01 | 1.04 | 100 | 0.0100 | -0.0004 | -0.0007 | 0.25 | 0.0003 | -94.2623 | -91.7671 | 0.8188 | 0.8011 |
 
 
 
 
 
 
 
 
71
 
72
 
73
  ### Framework versions
 
2
  license: mit
3
  library_name: peft
4
  tags:
 
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
8
  base_model: microsoft/phi-2
9
  model-index:
10
  - name: phi-2-gpo-ultrachat-lora-2
 
16
 
17
  # phi-2-gpo-ultrachat-lora-2
18
 
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.0093
22
+ - Rewards/chosen: -0.0154
23
+ - Rewards/rejected: -0.0218
24
+ - Rewards/accuracies: 0.3500
25
+ - Rewards/margins: 0.0064
26
+ - Logps/rejected: -96.3794
27
+ - Logps/chosen: -93.2678
28
+ - Logits/rejected: 0.7520
29
+ - Logits/chosen: 0.7332
30
 
31
  ## Model description
32
 
 
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Logits/chosen | Logits/rejected | Logps/chosen | Logps/rejected | Validation Loss | Rewards/accuracies | Rewards/chosen | Rewards/margins | Rewards/rejected |
65
+ |:-------------:|:-----:|:----:|:-------------:|:---------------:|:------------:|:--------------:|:---------------:|:------------------:|:--------------:|:---------------:|:----------------:|
66
+ | 0.01 | 1.04 | 100 | 0.8011 | 0.8188 | -91.7671 | -94.2623 | 0.0100 | 0.25 | -0.0004 | 0.0003 | -0.0007 |
67
+ | 0.0098 | 0.42 | 200 | 0.0098 | -0.0018 | -0.0032 | 0.3060 | 0.0015 | -94.5191 | -91.9032 | 0.8107 | 0.7928 |
68
+ | 0.0095 | 0.63 | 300 | 0.0096 | -0.0058 | -0.0088 | 0.3060 | 0.0030 | -95.0819 | -92.3092 | 0.7982 | 0.7800 |
69
+ | 0.0091 | 0.84 | 400 | 0.0094 | -0.0110 | -0.0157 | 0.3340 | 0.0047 | -95.7642 | -92.8250 | 0.7753 | 0.7565 |
70
+ | 0.0094 | 1.05 | 500 | 0.0093 | -0.0132 | -0.0192 | 0.3400 | 0.0060 | -96.1150 | -93.0463 | 0.7679 | 0.7492 |
71
+ | 0.0093 | 1.26 | 600 | 0.0093 | -0.0144 | -0.0207 | 0.3440 | 0.0063 | -96.2631 | -93.1677 | 0.7578 | 0.7383 |
72
+ | 0.009 | 1.47 | 700 | 0.0093 | -0.0152 | -0.0212 | 0.3480 | 0.0060 | -96.3198 | -93.2491 | 0.7545 | 0.7355 |
73
+ | 0.009 | 1.67 | 800 | 0.0093 | -0.0155 | -0.0218 | 0.3420 | 0.0063 | -96.3791 | -93.2749 | 0.7523 | 0.7328 |
74
+ | 0.0091 | 1.88 | 900 | 0.0093 | -0.0156 | -0.0218 | 0.3480 | 0.0063 | -96.3809 | -93.2841 | 0.7515 | 0.7320 |
75
 
76
 
77
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6db1a6a7ed5705eb5d79ae2110519da3acbcf78177d5d93eae7093c563d46628
3
  size 41977616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df2ff3799a11a765893c7f36ebf65cc1ce4d76654a140aaa8e916529c6773a44
3
  size 41977616
all_results.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "epoch": 1.98,
3
- "eval_logits/chosen": 0.7993869781494141,
4
- "eval_logits/rejected": 0.8175749778747559,
5
- "eval_logps/chosen": -91.77693176269531,
6
- "eval_logps/rejected": -94.28819274902344,
7
- "eval_loss": 0.009953780099749565,
8
- "eval_rewards/accuracies": 0.2619999945163727,
9
- "eval_rewards/chosen": -0.0004955825861543417,
10
- "eval_rewards/margins": 0.00041623329161666334,
11
- "eval_rewards/rejected": -0.0009118159650824964,
12
- "eval_runtime": 274.2974,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 7.291,
15
- "eval_steps_per_second": 0.456,
16
- "train_loss": 0.00995884225458691,
17
- "train_runtime": 2568.9627,
18
- "train_samples": 6113,
19
- "train_samples_per_second": 4.759,
20
- "train_steps_per_second": 0.074
21
  }
 
1
  {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.7331738471984863,
4
+ "eval_logits/rejected": 0.7520135045051575,
5
+ "eval_logps/chosen": -93.26776885986328,
6
+ "eval_logps/rejected": -96.37944030761719,
7
+ "eval_loss": 0.00928194634616375,
8
+ "eval_rewards/accuracies": 0.3499999940395355,
9
+ "eval_rewards/chosen": -0.015403981320559978,
10
+ "eval_rewards/margins": 0.006420300807803869,
11
+ "eval_rewards/rejected": -0.02182428352534771,
12
+ "eval_runtime": 272.4398,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 7.341,
15
+ "eval_steps_per_second": 0.459,
16
+ "train_loss": 0.008370190804172112,
17
+ "train_runtime": 12459.0229,
18
+ "train_samples": 30567,
19
+ "train_samples_per_second": 4.907,
20
+ "train_steps_per_second": 0.077
21
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
- "epoch": 1.98,
3
- "eval_logits/chosen": 0.7993869781494141,
4
- "eval_logits/rejected": 0.8175749778747559,
5
- "eval_logps/chosen": -91.77693176269531,
6
- "eval_logps/rejected": -94.28819274902344,
7
- "eval_loss": 0.009953780099749565,
8
- "eval_rewards/accuracies": 0.2619999945163727,
9
- "eval_rewards/chosen": -0.0004955825861543417,
10
- "eval_rewards/margins": 0.00041623329161666334,
11
- "eval_rewards/rejected": -0.0009118159650824964,
12
- "eval_runtime": 274.2974,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 7.291,
15
- "eval_steps_per_second": 0.456
16
  }
 
1
  {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": 0.7331738471984863,
4
+ "eval_logits/rejected": 0.7520135045051575,
5
+ "eval_logps/chosen": -93.26776885986328,
6
+ "eval_logps/rejected": -96.37944030761719,
7
+ "eval_loss": 0.00928194634616375,
8
+ "eval_rewards/accuracies": 0.3499999940395355,
9
+ "eval_rewards/chosen": -0.015403981320559978,
10
+ "eval_rewards/margins": 0.006420300807803869,
11
+ "eval_rewards/rejected": -0.02182428352534771,
12
+ "eval_runtime": 272.4398,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 7.341,
15
+ "eval_steps_per_second": 0.459
16
  }
runs/Mar01_00-18-23_gpu4-119-4/events.out.tfevents.1709212862.gpu4-119-4.1402568.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d930d6166f7b630c63f1326fd366f10379fe5bb72aba39068d362f4621b9eb0
3
- size 61452
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e93387dd7351f25e4bee2b7f566d1118f4374ae2d039ce989f37eec0f7f5165c
3
+ size 64976
runs/Mar01_00-18-23_gpu4-119-4/events.out.tfevents.1709225593.gpu4-119-4.1402568.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06232da9ee6e82413cdcf198440cf3a6da0fd688540a1ef72b1e4c08dc9156e4
3
+ size 828
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 1.98,
3
- "train_loss": 0.00995884225458691,
4
- "train_runtime": 2568.9627,
5
- "train_samples": 6113,
6
- "train_samples_per_second": 4.759,
7
- "train_steps_per_second": 0.074
8
  }
 
1
  {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.008370190804172112,
4
+ "train_runtime": 12459.0229,
5
+ "train_samples": 30567,
6
+ "train_samples_per_second": 4.907,
7
+ "train_steps_per_second": 0.077
8
  }
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.9843342036553526,
5
  "eval_steps": 100,
6
- "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -179,143 +179,1335 @@
179
  "step": 100
180
  },
181
  {
182
- "epoch": 1.15,
183
- "learning_rate": 2.2478162071993296e-06,
184
- "logits/chosen": 0.8606807589530945,
185
- "logits/rejected": 0.9067083597183228,
186
- "logps/chosen": -103.346435546875,
187
- "logps/rejected": -102.74019622802734,
188
  "loss": 0.0099,
189
- "rewards/accuracies": 0.3125,
190
- "rewards/chosen": 0.0002891735057346523,
191
- "rewards/margins": 0.0007119966903701425,
192
- "rewards/rejected": -0.00042282306822016835,
193
  "step": 110
194
  },
195
  {
196
- "epoch": 1.25,
197
- "learning_rate": 1.797672000566077e-06,
198
- "logits/chosen": 0.8887661099433899,
199
- "logits/rejected": 0.8524330258369446,
200
- "logps/chosen": -92.60103607177734,
201
- "logps/rejected": -70.87162780761719,
202
- "loss": 0.0099,
203
  "rewards/accuracies": 0.2874999940395355,
204
- "rewards/chosen": 0.00012959113519173115,
205
- "rewards/margins": 0.0010213626082986593,
206
- "rewards/rejected": -0.0008917713421396911,
207
  "step": 120
208
  },
209
  {
210
- "epoch": 1.36,
211
- "learning_rate": 1.3711666042227772e-06,
212
- "logits/chosen": 0.8812958598136902,
213
- "logits/rejected": 0.9191433191299438,
214
- "logps/chosen": -99.47935485839844,
215
- "logps/rejected": -102.90433502197266,
216
  "loss": 0.0099,
217
- "rewards/accuracies": 0.33125001192092896,
218
- "rewards/chosen": -0.00027921958826482296,
219
- "rewards/margins": 0.00039964643656276166,
220
- "rewards/rejected": -0.0006788660539314151,
221
  "step": 130
222
  },
223
  {
224
- "epoch": 1.46,
225
- "learning_rate": 9.826552484321086e-07,
226
- "logits/chosen": 0.8100296258926392,
227
- "logits/rejected": 0.8721168637275696,
228
- "logps/chosen": -98.98599243164062,
229
- "logps/rejected": -95.76078033447266,
230
- "loss": 0.0099,
231
- "rewards/accuracies": 0.29374998807907104,
232
- "rewards/chosen": -0.0003388571203686297,
233
- "rewards/margins": 0.0009376562084071338,
234
- "rewards/rejected": -0.0012765133287757635,
235
  "step": 140
236
  },
237
  {
238
- "epoch": 1.57,
239
- "learning_rate": 6.452143679117965e-07,
240
- "logits/chosen": 0.8696478009223938,
241
- "logits/rejected": 0.884810745716095,
242
- "logps/chosen": -74.74392700195312,
243
- "logps/rejected": -80.85291290283203,
244
  "loss": 0.0099,
245
- "rewards/accuracies": 0.2874999940395355,
246
- "rewards/chosen": -3.058182119275443e-05,
247
- "rewards/margins": 0.0010159575613215566,
248
- "rewards/rejected": -0.0010465392842888832,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 1.67,
253
- "learning_rate": 3.7020147790418266e-07,
254
- "logits/chosen": 0.8866029977798462,
255
- "logits/rejected": 0.8834725618362427,
256
- "logps/chosen": -105.7184066772461,
257
- "logps/rejected": -99.87519836425781,
258
  "loss": 0.0099,
259
- "rewards/accuracies": 0.28125,
260
- "rewards/chosen": -0.0007261586142703891,
261
- "rewards/margins": 2.431169559713453e-05,
262
- "rewards/rejected": -0.000750470208004117,
263
  "step": 160
264
  },
265
  {
266
- "epoch": 1.78,
267
- "learning_rate": 1.6687290528135725e-07,
268
- "logits/chosen": 0.7714171409606934,
269
- "logits/rejected": 0.8409671783447266,
270
- "logps/chosen": -102.75898742675781,
271
- "logps/rejected": -96.38053894042969,
272
  "loss": 0.0099,
273
- "rewards/accuracies": 0.3375000059604645,
274
- "rewards/chosen": -0.0006845382740721107,
275
- "rewards/margins": 0.00033002972486428916,
276
- "rewards/rejected": -0.0010145680280402303,
277
  "step": 170
278
  },
279
  {
280
- "epoch": 1.88,
281
- "learning_rate": 4.207224101311247e-08,
282
- "logits/chosen": 0.8822728395462036,
283
- "logits/rejected": 0.935335636138916,
284
- "logps/chosen": -119.57994079589844,
285
- "logps/rejected": -112.05470275878906,
286
  "loss": 0.0099,
287
- "rewards/accuracies": 0.3125,
288
- "rewards/chosen": -0.0007582681137137115,
289
- "rewards/margins": 0.0003905483172275126,
290
- "rewards/rejected": -0.001148816430941224,
291
  "step": 180
292
  },
293
  {
294
- "epoch": 1.98,
295
- "learning_rate": 0.0,
296
- "logits/chosen": 0.8246952295303345,
297
- "logits/rejected": 0.8706514239311218,
298
- "logps/chosen": -73.35179138183594,
299
- "logps/rejected": -76.91303253173828,
300
- "loss": 0.0099,
301
- "rewards/accuracies": 0.25,
302
- "rewards/chosen": -0.0005546126631088555,
303
- "rewards/margins": 0.0004530520236585289,
304
- "rewards/rejected": -0.0010076647158712149,
305
  "step": 190
306
  },
307
  {
308
- "epoch": 1.98,
309
- "step": 190,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  "total_flos": 0.0,
311
- "train_loss": 0.00995884225458691,
312
- "train_runtime": 2568.9627,
313
- "train_samples_per_second": 4.759,
314
- "train_steps_per_second": 0.074
315
  }
316
  ],
317
  "logging_steps": 10,
318
- "max_steps": 190,
319
  "num_input_tokens_seen": 0,
320
  "num_train_epochs": 2,
321
  "save_steps": 100,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.9968602825745683,
5
  "eval_steps": 100,
6
+ "global_step": 954,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
179
  "step": 100
180
  },
181
  {
182
+ "epoch": 0.23,
183
+ "learning_rate": 4.996716052911017e-06,
184
+ "logits/chosen": 0.807642936706543,
185
+ "logits/rejected": 0.8303602337837219,
186
+ "logps/chosen": -110.20500183105469,
187
+ "logps/rejected": -96.52973937988281,
188
  "loss": 0.0099,
189
+ "rewards/accuracies": 0.33125001192092896,
190
+ "rewards/chosen": -0.0005453795311041176,
191
+ "rewards/margins": 0.0005892693297937512,
192
+ "rewards/rejected": -0.0011346489191055298,
193
  "step": 110
194
  },
195
  {
196
+ "epoch": 0.25,
197
+ "learning_rate": 4.9903533134293035e-06,
198
+ "logits/chosen": 0.8457789421081543,
199
+ "logits/rejected": 0.9221089482307434,
200
+ "logps/chosen": -84.42936706542969,
201
+ "logps/rejected": -80.35577392578125,
202
+ "loss": 0.01,
203
  "rewards/accuracies": 0.2874999940395355,
204
+ "rewards/chosen": -0.0005200408631935716,
205
+ "rewards/margins": -2.1466799807967618e-05,
206
+ "rewards/rejected": -0.0004985741106793284,
207
  "step": 120
208
  },
209
  {
210
+ "epoch": 0.27,
211
+ "learning_rate": 4.9806521797692184e-06,
212
+ "logits/chosen": 0.7757605910301208,
213
+ "logits/rejected": 0.8338298797607422,
214
+ "logps/chosen": -91.54916381835938,
215
+ "logps/rejected": -80.19969177246094,
216
  "loss": 0.0099,
217
+ "rewards/accuracies": 0.28125,
218
+ "rewards/chosen": -0.00031306734308600426,
219
+ "rewards/margins": 0.0007860729238018394,
220
+ "rewards/rejected": -0.0010991402668878436,
221
  "step": 130
222
  },
223
  {
224
+ "epoch": 0.29,
225
+ "learning_rate": 4.967625656594782e-06,
226
+ "logits/chosen": 0.8086369633674622,
227
+ "logits/rejected": 0.8897687792778015,
228
+ "logps/chosen": -102.42750549316406,
229
+ "logps/rejected": -121.26502990722656,
230
+ "loss": 0.01,
231
+ "rewards/accuracies": 0.2874999940395355,
232
+ "rewards/chosen": -0.0017349247355014086,
233
+ "rewards/margins": -0.0009382988209836185,
234
+ "rewards/rejected": -0.0007966257398948073,
235
  "step": 140
236
  },
237
  {
238
+ "epoch": 0.31,
239
+ "learning_rate": 4.95129120635556e-06,
240
+ "logits/chosen": 0.8270760774612427,
241
+ "logits/rejected": 0.8866473436355591,
242
+ "logps/chosen": -98.09243774414062,
243
+ "logps/rejected": -75.63480377197266,
244
  "loss": 0.0099,
245
+ "rewards/accuracies": 0.29374998807907104,
246
+ "rewards/chosen": -0.0008643465116620064,
247
+ "rewards/margins": 0.0005676061264239252,
248
+ "rewards/rejected": -0.0014319528127089143,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.33,
253
+ "learning_rate": 4.93167072587771e-06,
254
+ "logits/chosen": 0.7157760858535767,
255
+ "logits/rejected": 0.7634655237197876,
256
+ "logps/chosen": -80.0224609375,
257
+ "logps/rejected": -73.7667236328125,
258
  "loss": 0.0099,
259
+ "rewards/accuracies": 0.26875001192092896,
260
+ "rewards/chosen": -0.0008890128810890019,
261
+ "rewards/margins": 0.0006957318400964141,
262
+ "rewards/rejected": -0.001584744779393077,
263
  "step": 160
264
  },
265
  {
266
+ "epoch": 0.36,
267
+ "learning_rate": 4.908790517010637e-06,
268
+ "logits/chosen": 0.8680498003959656,
269
+ "logits/rejected": 0.9332998394966125,
270
+ "logps/chosen": -72.52728271484375,
271
+ "logps/rejected": -81.39039611816406,
272
  "loss": 0.0099,
273
+ "rewards/accuracies": 0.3062500059604645,
274
+ "rewards/chosen": -0.0010830673854798079,
275
+ "rewards/margins": 0.0011036808136850595,
276
+ "rewards/rejected": -0.0021867481991648674,
277
  "step": 170
278
  },
279
  {
280
+ "epoch": 0.38,
281
+ "learning_rate": 4.882681251368549e-06,
282
+ "logits/chosen": 0.7853900790214539,
283
+ "logits/rejected": 0.8009947538375854,
284
+ "logps/chosen": -98.69686889648438,
285
+ "logps/rejected": -94.05746459960938,
286
  "loss": 0.0099,
287
+ "rewards/accuracies": 0.30000001192092896,
288
+ "rewards/chosen": -0.0003774884680751711,
289
+ "rewards/margins": 0.0011976181995123625,
290
+ "rewards/rejected": -0.001575106754899025,
291
  "step": 180
292
  },
293
  {
294
+ "epoch": 0.4,
295
+ "learning_rate": 4.853377929214243e-06,
296
+ "logits/chosen": 0.7380321621894836,
297
+ "logits/rejected": 0.8313556909561157,
298
+ "logps/chosen": -97.42859649658203,
299
+ "logps/rejected": -107.14707946777344,
300
+ "loss": 0.0098,
301
+ "rewards/accuracies": 0.33125001192092896,
302
+ "rewards/chosen": -0.0015280761290341616,
303
+ "rewards/margins": 0.0013618851080536842,
304
+ "rewards/rejected": -0.0028899614699184895,
305
  "step": 190
306
  },
307
  {
308
+ "epoch": 0.42,
309
+ "learning_rate": 4.8209198325401815e-06,
310
+ "logits/chosen": 0.7754073143005371,
311
+ "logits/rejected": 0.8368635177612305,
312
+ "logps/chosen": -77.53565216064453,
313
+ "logps/rejected": -86.88373565673828,
314
+ "loss": 0.0098,
315
+ "rewards/accuracies": 0.30000001192092896,
316
+ "rewards/chosen": -0.0016312580555677414,
317
+ "rewards/margins": 0.002017855178564787,
318
+ "rewards/rejected": -0.0036491132341325283,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.42,
323
+ "eval_logits/chosen": 0.79280686378479,
324
+ "eval_logits/rejected": 0.8107121586799622,
325
+ "eval_logps/chosen": -91.90320587158203,
326
+ "eval_logps/rejected": -94.51907348632812,
327
+ "eval_loss": 0.009800148196518421,
328
+ "eval_rewards/accuracies": 0.3059999942779541,
329
+ "eval_rewards/chosen": -0.0017583017470315099,
330
+ "eval_rewards/margins": 0.0014622843591496348,
331
+ "eval_rewards/rejected": -0.0032205861061811447,
332
+ "eval_runtime": 274.5029,
333
+ "eval_samples_per_second": 7.286,
334
+ "eval_steps_per_second": 0.455,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.44,
339
+ "learning_rate": 4.785350472409792e-06,
340
+ "logits/chosen": 0.8355112075805664,
341
+ "logits/rejected": 0.8323017358779907,
342
+ "logps/chosen": -93.525146484375,
343
+ "logps/rejected": -86.69541931152344,
344
+ "loss": 0.0098,
345
+ "rewards/accuracies": 0.2750000059604645,
346
+ "rewards/chosen": -0.002141474513337016,
347
+ "rewards/margins": 0.0012898927088826895,
348
+ "rewards/rejected": -0.0034313672222197056,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.46,
353
+ "learning_rate": 4.746717530629565e-06,
354
+ "logits/chosen": 0.7978548407554626,
355
+ "logits/rejected": 0.8422772288322449,
356
+ "logps/chosen": -100.98792266845703,
357
+ "logps/rejected": -95.15986633300781,
358
+ "loss": 0.0099,
359
+ "rewards/accuracies": 0.32499998807907104,
360
+ "rewards/chosen": -0.0021895477548241615,
361
+ "rewards/margins": 0.0018103765323758125,
362
+ "rewards/rejected": -0.003999924287199974,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.48,
367
+ "learning_rate": 4.7050727958301505e-06,
368
+ "logits/chosen": 0.8680189251899719,
369
+ "logits/rejected": 0.8407084345817566,
370
+ "logps/chosen": -88.7295913696289,
371
+ "logps/rejected": -93.2841796875,
372
+ "loss": 0.0098,
373
+ "rewards/accuracies": 0.3687500059604645,
374
+ "rewards/chosen": -0.0033182327169924974,
375
+ "rewards/margins": 0.001298791728913784,
376
+ "rewards/rejected": -0.004617024213075638,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.5,
381
+ "learning_rate": 4.660472094042121e-06,
382
+ "logits/chosen": 0.7951023578643799,
383
+ "logits/rejected": 0.8105288743972778,
384
+ "logps/chosen": -115.34117126464844,
385
+ "logps/rejected": -101.9451904296875,
386
+ "loss": 0.0095,
387
+ "rewards/accuracies": 0.34375,
388
+ "rewards/chosen": -0.002927349414676428,
389
+ "rewards/margins": 0.0025170063599944115,
390
+ "rewards/rejected": -0.005444356240332127,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.52,
395
+ "learning_rate": 4.612975213859487e-06,
396
+ "logits/chosen": 0.7722874879837036,
397
+ "logits/rejected": 0.836758017539978,
398
+ "logps/chosen": -116.63499450683594,
399
+ "logps/rejected": -107.69172668457031,
400
+ "loss": 0.0097,
401
+ "rewards/accuracies": 0.38749998807907104,
402
+ "rewards/chosen": -0.002974079456180334,
403
+ "rewards/margins": 0.002990330569446087,
404
+ "rewards/rejected": -0.005964409559965134,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.54,
409
+ "learning_rate": 4.5626458262912745e-06,
410
+ "logits/chosen": 0.7915774583816528,
411
+ "logits/rejected": 0.8718246221542358,
412
+ "logps/chosen": -99.33454132080078,
413
+ "logps/rejected": -88.96430969238281,
414
+ "loss": 0.0095,
415
+ "rewards/accuracies": 0.3125,
416
+ "rewards/chosen": -0.00338442693464458,
417
+ "rewards/margins": 0.00286421668715775,
418
+ "rewards/rejected": -0.006248644087463617,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.57,
423
+ "learning_rate": 4.509551399408598e-06,
424
+ "logits/chosen": 0.886553168296814,
425
+ "logits/rejected": 0.9329681396484375,
426
+ "logps/chosen": -114.57804870605469,
427
+ "logps/rejected": -83.83116149902344,
428
+ "loss": 0.0095,
429
+ "rewards/accuracies": 0.3375000059604645,
430
+ "rewards/chosen": -0.003462123218923807,
431
+ "rewards/margins": 0.003769775154069066,
432
+ "rewards/rejected": -0.007231898605823517,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.59,
437
+ "learning_rate": 4.453763107901676e-06,
438
+ "logits/chosen": 0.8362187147140503,
439
+ "logits/rejected": 0.8351699113845825,
440
+ "logps/chosen": -121.24173736572266,
441
+ "logps/rejected": -109.759521484375,
442
+ "loss": 0.0095,
443
+ "rewards/accuracies": 0.35624998807907104,
444
+ "rewards/chosen": -0.004291792865842581,
445
+ "rewards/margins": 0.0028506286907941103,
446
+ "rewards/rejected": -0.007142421789467335,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.61,
451
+ "learning_rate": 4.3953557376679856e-06,
452
+ "logits/chosen": 0.820317268371582,
453
+ "logits/rejected": 0.8414691686630249,
454
+ "logps/chosen": -95.08177185058594,
455
+ "logps/rejected": -92.17173767089844,
456
+ "loss": 0.0096,
457
+ "rewards/accuracies": 0.3499999940395355,
458
+ "rewards/chosen": -0.006106963846832514,
459
+ "rewards/margins": 0.0012445768807083368,
460
+ "rewards/rejected": -0.007351540960371494,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.63,
465
+ "learning_rate": 4.33440758555951e-06,
466
+ "logits/chosen": 0.7827913165092468,
467
+ "logits/rejected": 0.8337699174880981,
468
+ "logps/chosen": -79.63487243652344,
469
+ "logps/rejected": -88.0806884765625,
470
+ "loss": 0.0095,
471
+ "rewards/accuracies": 0.32499998807907104,
472
+ "rewards/chosen": -0.0031589865684509277,
473
+ "rewards/margins": 0.0048677194863557816,
474
+ "rewards/rejected": -0.00802670605480671,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.63,
479
+ "eval_logits/chosen": 0.7800281643867493,
480
+ "eval_logits/rejected": 0.7981991171836853,
481
+ "eval_logps/chosen": -92.30917358398438,
482
+ "eval_logps/rejected": -95.08193969726562,
483
+ "eval_loss": 0.009581359103322029,
484
+ "eval_rewards/accuracies": 0.3059999942779541,
485
+ "eval_rewards/chosen": -0.005818030331283808,
486
+ "eval_rewards/margins": 0.0030311874579638243,
487
+ "eval_rewards/rejected": -0.008849218487739563,
488
+ "eval_runtime": 274.4906,
489
+ "eval_samples_per_second": 7.286,
490
+ "eval_steps_per_second": 0.455,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.65,
495
+ "learning_rate": 4.2710003544234255e-06,
496
+ "logits/chosen": 0.7880622744560242,
497
+ "logits/rejected": 0.8204299211502075,
498
+ "logps/chosen": -86.09496307373047,
499
+ "logps/rejected": -87.77622985839844,
500
+ "loss": 0.0095,
501
+ "rewards/accuracies": 0.3375000059604645,
502
+ "rewards/chosen": -0.006854506675153971,
503
+ "rewards/margins": 0.0034410678781569004,
504
+ "rewards/rejected": -0.010295574553310871,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.67,
509
+ "learning_rate": 4.205219043576955e-06,
510
+ "logits/chosen": 0.7853392958641052,
511
+ "logits/rejected": 0.8362796902656555,
512
+ "logps/chosen": -88.15892028808594,
513
+ "logps/rejected": -91.532470703125,
514
+ "loss": 0.0096,
515
+ "rewards/accuracies": 0.375,
516
+ "rewards/chosen": -0.0067759170196950436,
517
+ "rewards/margins": 0.003778536571189761,
518
+ "rewards/rejected": -0.010554454289376736,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.69,
523
+ "learning_rate": 4.137151834863213e-06,
524
+ "logits/chosen": 0.7346684336662292,
525
+ "logits/rejected": 0.7727020382881165,
526
+ "logps/chosen": -101.71944427490234,
527
+ "logps/rejected": -102.30587005615234,
528
+ "loss": 0.0095,
529
+ "rewards/accuracies": 0.33125001192092896,
530
+ "rewards/chosen": -0.007574302610009909,
531
+ "rewards/margins": 0.004506202414631844,
532
+ "rewards/rejected": -0.012080504558980465,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.71,
537
+ "learning_rate": 4.066889974440757e-06,
538
+ "logits/chosen": 0.7809281945228577,
539
+ "logits/rejected": 0.8105946779251099,
540
+ "logps/chosen": -83.26361846923828,
541
+ "logps/rejected": -73.31196594238281,
542
+ "loss": 0.0093,
543
+ "rewards/accuracies": 0.3375000059604645,
544
+ "rewards/chosen": -0.004678535740822554,
545
+ "rewards/margins": 0.005057544447481632,
546
+ "rewards/rejected": -0.009736080653965473,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.73,
551
+ "learning_rate": 3.994527650465352e-06,
552
+ "logits/chosen": 0.8511131405830383,
553
+ "logits/rejected": 0.8692939877510071,
554
+ "logps/chosen": -89.02024841308594,
555
+ "logps/rejected": -83.25767517089844,
556
+ "loss": 0.0093,
557
+ "rewards/accuracies": 0.4000000059604645,
558
+ "rewards/chosen": -0.009549392387270927,
559
+ "rewards/margins": 0.004139441065490246,
560
+ "rewards/rejected": -0.013688832521438599,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.75,
565
+ "learning_rate": 3.92016186682789e-06,
566
+ "logits/chosen": 0.7794450521469116,
567
+ "logits/rejected": 0.7929707765579224,
568
+ "logps/chosen": -95.35264587402344,
569
+ "logps/rejected": -99.0013198852539,
570
+ "loss": 0.0093,
571
+ "rewards/accuracies": 0.4000000059604645,
572
+ "rewards/chosen": -0.009752290323376656,
573
+ "rewards/margins": 0.005257262382656336,
574
+ "rewards/rejected": -0.015009550377726555,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.77,
579
+ "learning_rate": 3.843892313117724e-06,
580
+ "logits/chosen": 0.8562378883361816,
581
+ "logits/rejected": 0.8364348411560059,
582
+ "logps/chosen": -108.0776596069336,
583
+ "logps/rejected": -104.81854248046875,
584
+ "loss": 0.0095,
585
+ "rewards/accuracies": 0.3687500059604645,
586
+ "rewards/chosen": -0.008355715312063694,
587
+ "rewards/margins": 0.0053038811311125755,
588
+ "rewards/rejected": -0.01365959644317627,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.8,
593
+ "learning_rate": 3.7658212309857576e-06,
594
+ "logits/chosen": 0.7989660501480103,
595
+ "logits/rejected": 0.8410453796386719,
596
+ "logps/chosen": -102.3397216796875,
597
+ "logps/rejected": -88.8924789428711,
598
+ "loss": 0.0091,
599
+ "rewards/accuracies": 0.3812499940395355,
600
+ "rewards/chosen": -0.008961253799498081,
601
+ "rewards/margins": 0.006044700741767883,
602
+ "rewards/rejected": -0.015005955472588539,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.82,
607
+ "learning_rate": 3.686053277086401e-06,
608
+ "logits/chosen": 0.7839023470878601,
609
+ "logits/rejected": 0.8361980319023132,
610
+ "logps/chosen": -110.81888580322266,
611
+ "logps/rejected": -100.21189880371094,
612
+ "loss": 0.0091,
613
+ "rewards/accuracies": 0.39375001192092896,
614
+ "rewards/chosen": -0.007862605154514313,
615
+ "rewards/margins": 0.007173668593168259,
616
+ "rewards/rejected": -0.015036274679005146,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.84,
621
+ "learning_rate": 3.604695382782159e-06,
622
+ "logits/chosen": 0.7966210246086121,
623
+ "logits/rejected": 0.82738196849823,
624
+ "logps/chosen": -100.70481872558594,
625
+ "logps/rejected": -103.6056137084961,
626
+ "loss": 0.0091,
627
+ "rewards/accuracies": 0.34375,
628
+ "rewards/chosen": -0.01019311510026455,
629
+ "rewards/margins": 0.006234516389667988,
630
+ "rewards/rejected": -0.01642763242125511,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.84,
635
+ "eval_logits/chosen": 0.7564685344696045,
636
+ "eval_logits/rejected": 0.7753021121025085,
637
+ "eval_logps/chosen": -92.824951171875,
638
+ "eval_logps/rejected": -95.7641830444336,
639
+ "eval_loss": 0.009432977996766567,
640
+ "eval_rewards/accuracies": 0.33399999141693115,
641
+ "eval_rewards/chosen": -0.010975906625390053,
642
+ "eval_rewards/margins": 0.004695890471339226,
643
+ "eval_rewards/rejected": -0.01567179709672928,
644
+ "eval_runtime": 274.283,
645
+ "eval_samples_per_second": 7.292,
646
+ "eval_steps_per_second": 0.456,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.86,
651
+ "learning_rate": 3.5218566107988872e-06,
652
+ "logits/chosen": 0.7925983667373657,
653
+ "logits/rejected": 0.7887567281723022,
654
+ "logps/chosen": -96.2036361694336,
655
+ "logps/rejected": -81.12339782714844,
656
+ "loss": 0.0095,
657
+ "rewards/accuracies": 0.3375000059604645,
658
+ "rewards/chosen": -0.011769723147153854,
659
+ "rewards/margins": 0.004597696475684643,
660
+ "rewards/rejected": -0.016367420554161072,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.88,
665
+ "learning_rate": 3.437648009023905e-06,
666
+ "logits/chosen": 0.7313151359558105,
667
+ "logits/rejected": 0.791450560092926,
668
+ "logps/chosen": -96.06840515136719,
669
+ "logps/rejected": -98.45843505859375,
670
+ "loss": 0.0095,
671
+ "rewards/accuracies": 0.3375000059604645,
672
+ "rewards/chosen": -0.012706448324024677,
673
+ "rewards/margins": 0.004676566459238529,
674
+ "rewards/rejected": -0.017383014783263206,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.9,
679
+ "learning_rate": 3.352182461642929e-06,
680
+ "logits/chosen": 0.836865246295929,
681
+ "logits/rejected": 0.8605507612228394,
682
+ "logps/chosen": -83.78216552734375,
683
+ "logps/rejected": -90.726806640625,
684
+ "loss": 0.0095,
685
+ "rewards/accuracies": 0.4000000059604645,
686
+ "rewards/chosen": -0.010321888141334057,
687
+ "rewards/margins": 0.0064377314411103725,
688
+ "rewards/rejected": -0.01675962097942829,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.92,
693
+ "learning_rate": 3.265574537815398e-06,
694
+ "logits/chosen": 0.7546414136886597,
695
+ "logits/rejected": 0.8063668012619019,
696
+ "logps/chosen": -110.22758483886719,
697
+ "logps/rejected": -82.65476989746094,
698
+ "loss": 0.0094,
699
+ "rewards/accuracies": 0.33125001192092896,
700
+ "rewards/chosen": -0.009618332609534264,
701
+ "rewards/margins": 0.007489732000976801,
702
+ "rewards/rejected": -0.017108064144849777,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.94,
707
+ "learning_rate": 3.177940338091043e-06,
708
+ "logits/chosen": 0.7461640238761902,
709
+ "logits/rejected": 0.8213142156600952,
710
+ "logps/chosen": -81.55519104003906,
711
+ "logps/rejected": -72.42239379882812,
712
+ "loss": 0.0093,
713
+ "rewards/accuracies": 0.3125,
714
+ "rewards/chosen": -0.009043378755450249,
715
+ "rewards/margins": 0.005746514070779085,
716
+ "rewards/rejected": -0.014789892360568047,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.96,
721
+ "learning_rate": 3.089397338773569e-06,
722
+ "logits/chosen": 0.7719414830207825,
723
+ "logits/rejected": 0.8252090215682983,
724
+ "logps/chosen": -106.2469482421875,
725
+ "logps/rejected": -88.18677520751953,
726
+ "loss": 0.0092,
727
+ "rewards/accuracies": 0.3187499940395355,
728
+ "rewards/chosen": -0.009636683389544487,
729
+ "rewards/margins": 0.008729028515517712,
730
+ "rewards/rejected": -0.018365712836384773,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.98,
735
+ "learning_rate": 3.0000642344401115e-06,
736
+ "logits/chosen": 0.8243037462234497,
737
+ "logits/rejected": 0.8203719854354858,
738
+ "logps/chosen": -116.20052337646484,
739
+ "logps/rejected": -111.63053131103516,
740
+ "loss": 0.0091,
741
+ "rewards/accuracies": 0.39375001192092896,
742
+ "rewards/chosen": -0.01138549018651247,
743
+ "rewards/margins": 0.00896347127854824,
744
+ "rewards/rejected": -0.020348962396383286,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 1.0,
749
+ "learning_rate": 2.9100607788275547e-06,
750
+ "logits/chosen": 0.8390713930130005,
751
+ "logits/rejected": 0.8485578298568726,
752
+ "logps/chosen": -76.21663665771484,
753
+ "logps/rejected": -66.13563537597656,
754
+ "loss": 0.0095,
755
+ "rewards/accuracies": 0.26249998807907104,
756
+ "rewards/chosen": -0.011686747893691063,
757
+ "rewards/margins": 0.003907538950443268,
758
+ "rewards/rejected": -0.01559428684413433,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 1.03,
763
+ "learning_rate": 2.8195076242990124e-06,
764
+ "logits/chosen": 0.781129777431488,
765
+ "logits/rejected": 0.8074111938476562,
766
+ "logps/chosen": -93.35603332519531,
767
+ "logps/rejected": -100.03508758544922,
768
+ "loss": 0.0093,
769
+ "rewards/accuracies": 0.375,
770
+ "rewards/chosen": -0.014726340770721436,
771
+ "rewards/margins": 0.004594289697706699,
772
+ "rewards/rejected": -0.01932062767446041,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 1.05,
777
+ "learning_rate": 2.72852616010567e-06,
778
+ "logits/chosen": 0.8007243871688843,
779
+ "logits/rejected": 0.7850581407546997,
780
+ "logps/chosen": -102.482177734375,
781
+ "logps/rejected": -102.58174133300781,
782
+ "loss": 0.0094,
783
+ "rewards/accuracies": 0.4000000059604645,
784
+ "rewards/chosen": -0.012946246191859245,
785
+ "rewards/margins": 0.009540179744362831,
786
+ "rewards/rejected": -0.022486427798867226,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 1.05,
791
+ "eval_logits/chosen": 0.7492075562477112,
792
+ "eval_logits/rejected": 0.7678821682929993,
793
+ "eval_logps/chosen": -93.04632568359375,
794
+ "eval_logps/rejected": -96.11502838134766,
795
+ "eval_loss": 0.009345741011202335,
796
+ "eval_rewards/accuracies": 0.3400000035762787,
797
+ "eval_rewards/chosen": -0.013189575634896755,
798
+ "eval_rewards/margins": 0.0059906188398599625,
799
+ "eval_rewards/rejected": -0.019180195406079292,
800
+ "eval_runtime": 272.2568,
801
+ "eval_samples_per_second": 7.346,
802
+ "eval_steps_per_second": 0.459,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 1.07,
807
+ "learning_rate": 2.637238349660819e-06,
808
+ "logits/chosen": 0.8036779165267944,
809
+ "logits/rejected": 0.8043034672737122,
810
+ "logps/chosen": -92.55281066894531,
811
+ "logps/rejected": -90.49037170410156,
812
+ "loss": 0.0091,
813
+ "rewards/accuracies": 0.34375,
814
+ "rewards/chosen": -0.014594177715480328,
815
+ "rewards/margins": 0.005402697250247002,
816
+ "rewards/rejected": -0.019996874034404755,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.09,
821
+ "learning_rate": 2.5457665670441937e-06,
822
+ "logits/chosen": 0.8009434938430786,
823
+ "logits/rejected": 0.8440135717391968,
824
+ "logps/chosen": -103.445068359375,
825
+ "logps/rejected": -94.67227935791016,
826
+ "loss": 0.0091,
827
+ "rewards/accuracies": 0.35624998807907104,
828
+ "rewards/chosen": -0.012103055603802204,
829
+ "rewards/margins": 0.008832341991364956,
830
+ "rewards/rejected": -0.02093539759516716,
831
+ "step": 520
832
+ },
833
+ {
834
+ "epoch": 1.11,
835
+ "learning_rate": 2.4542334329558075e-06,
836
+ "logits/chosen": 0.7876957058906555,
837
+ "logits/rejected": 0.8697893023490906,
838
+ "logps/chosen": -108.23606872558594,
839
+ "logps/rejected": -90.39962768554688,
840
+ "loss": 0.0093,
841
+ "rewards/accuracies": 0.3812499940395355,
842
+ "rewards/chosen": -0.012823512777686119,
843
+ "rewards/margins": 0.00771692767739296,
844
+ "rewards/rejected": -0.02054044045507908,
845
+ "step": 530
846
+ },
847
+ {
848
+ "epoch": 1.13,
849
+ "learning_rate": 2.3627616503391813e-06,
850
+ "logits/chosen": 0.7362729907035828,
851
+ "logits/rejected": 0.7685662508010864,
852
+ "logps/chosen": -103.24140930175781,
853
+ "logps/rejected": -88.57923889160156,
854
+ "loss": 0.0092,
855
+ "rewards/accuracies": 0.33125001192092896,
856
+ "rewards/chosen": -0.013577492907643318,
857
+ "rewards/margins": 0.006500400602817535,
858
+ "rewards/rejected": -0.020077891647815704,
859
+ "step": 540
860
+ },
861
+ {
862
+ "epoch": 1.15,
863
+ "learning_rate": 2.271473839894331e-06,
864
+ "logits/chosen": 0.727995753288269,
865
+ "logits/rejected": 0.7580054402351379,
866
+ "logps/chosen": -92.93126678466797,
867
+ "logps/rejected": -84.96773529052734,
868
+ "loss": 0.0093,
869
+ "rewards/accuracies": 0.33125001192092896,
870
+ "rewards/chosen": -0.012479904107749462,
871
+ "rewards/margins": 0.006294636521488428,
872
+ "rewards/rejected": -0.018774541094899178,
873
+ "step": 550
874
+ },
875
+ {
876
+ "epoch": 1.17,
877
+ "learning_rate": 2.1804923757009885e-06,
878
+ "logits/chosen": 0.7571858167648315,
879
+ "logits/rejected": 0.8027281761169434,
880
+ "logps/chosen": -117.69078063964844,
881
+ "logps/rejected": -117.47624206542969,
882
+ "loss": 0.0091,
883
+ "rewards/accuracies": 0.4124999940395355,
884
+ "rewards/chosen": -0.014920057728886604,
885
+ "rewards/margins": 0.00889978464692831,
886
+ "rewards/rejected": -0.02381983958184719,
887
+ "step": 560
888
+ },
889
+ {
890
+ "epoch": 1.19,
891
+ "learning_rate": 2.089939221172446e-06,
892
+ "logits/chosen": 0.7476423382759094,
893
+ "logits/rejected": 0.7867797017097473,
894
+ "logps/chosen": -96.03732299804688,
895
+ "logps/rejected": -93.63196563720703,
896
+ "loss": 0.0094,
897
+ "rewards/accuracies": 0.36250001192092896,
898
+ "rewards/chosen": -0.014638627879321575,
899
+ "rewards/margins": 0.007565396372228861,
900
+ "rewards/rejected": -0.022204022854566574,
901
+ "step": 570
902
+ },
903
+ {
904
+ "epoch": 1.21,
905
+ "learning_rate": 1.9999357655598894e-06,
906
+ "logits/chosen": 0.7799301147460938,
907
+ "logits/rejected": 0.7575241327285767,
908
+ "logps/chosen": -106.30534362792969,
909
+ "logps/rejected": -101.278564453125,
910
+ "loss": 0.0095,
911
+ "rewards/accuracies": 0.33125001192092896,
912
+ "rewards/chosen": -0.015909461304545403,
913
+ "rewards/margins": 0.0034894272685050964,
914
+ "rewards/rejected": -0.01939888671040535,
915
+ "step": 580
916
+ },
917
+ {
918
+ "epoch": 1.23,
919
+ "learning_rate": 1.9106026612264316e-06,
920
+ "logits/chosen": 0.8206084370613098,
921
+ "logits/rejected": 0.8035517930984497,
922
+ "logps/chosen": -82.16791534423828,
923
+ "logps/rejected": -81.92047882080078,
924
+ "loss": 0.0091,
925
+ "rewards/accuracies": 0.32499998807907104,
926
+ "rewards/chosen": -0.015170453116297722,
927
+ "rewards/margins": 0.006062434054911137,
928
+ "rewards/rejected": -0.021232888102531433,
929
+ "step": 590
930
+ },
931
+ {
932
+ "epoch": 1.26,
933
+ "learning_rate": 1.8220596619089576e-06,
934
+ "logits/chosen": 0.7637159824371338,
935
+ "logits/rejected": 0.7464240789413452,
936
+ "logps/chosen": -107.59675598144531,
937
+ "logps/rejected": -108.54317474365234,
938
+ "loss": 0.0093,
939
+ "rewards/accuracies": 0.38749998807907104,
940
+ "rewards/chosen": -0.016024868935346603,
941
+ "rewards/margins": 0.006444328930228949,
942
+ "rewards/rejected": -0.02246919646859169,
943
+ "step": 600
944
+ },
945
+ {
946
+ "epoch": 1.26,
947
+ "eval_logits/chosen": 0.7382512092590332,
948
+ "eval_logits/rejected": 0.7577717900276184,
949
+ "eval_logps/chosen": -93.16767120361328,
950
+ "eval_logps/rejected": -96.26305389404297,
951
+ "eval_loss": 0.00932270660996437,
952
+ "eval_rewards/accuracies": 0.3440000116825104,
953
+ "eval_rewards/chosen": -0.014403086155653,
954
+ "eval_rewards/margins": 0.006257344502955675,
955
+ "eval_rewards/rejected": -0.020660430192947388,
956
+ "eval_runtime": 272.2545,
957
+ "eval_samples_per_second": 7.346,
958
+ "eval_steps_per_second": 0.459,
959
+ "step": 600
960
+ },
961
+ {
962
+ "epoch": 1.28,
963
+ "learning_rate": 1.7344254621846018e-06,
964
+ "logits/chosen": 0.7539538145065308,
965
+ "logits/rejected": 0.773404598236084,
966
+ "logps/chosen": -114.31193542480469,
967
+ "logps/rejected": -104.0637435913086,
968
+ "loss": 0.0091,
969
+ "rewards/accuracies": 0.35624998807907104,
970
+ "rewards/chosen": -0.013692615553736687,
971
+ "rewards/margins": 0.007563846651464701,
972
+ "rewards/rejected": -0.02125646360218525,
973
+ "step": 610
974
+ },
975
+ {
976
+ "epoch": 1.3,
977
+ "learning_rate": 1.647817538357072e-06,
978
+ "logits/chosen": 0.7876811027526855,
979
+ "logits/rejected": 0.8452059626579285,
980
+ "logps/chosen": -104.75425720214844,
981
+ "logps/rejected": -99.91309356689453,
982
+ "loss": 0.0092,
983
+ "rewards/accuracies": 0.38749998807907104,
984
+ "rewards/chosen": -0.016282986849546432,
985
+ "rewards/margins": 0.007685069926083088,
986
+ "rewards/rejected": -0.023968055844306946,
987
+ "step": 620
988
+ },
989
+ {
990
+ "epoch": 1.32,
991
+ "learning_rate": 1.5623519909760953e-06,
992
+ "logits/chosen": 0.7141858339309692,
993
+ "logits/rejected": 0.7543732523918152,
994
+ "logps/chosen": -94.53080749511719,
995
+ "logps/rejected": -90.2872085571289,
996
+ "loss": 0.0091,
997
+ "rewards/accuracies": 0.3125,
998
+ "rewards/chosen": -0.014917632564902306,
999
+ "rewards/margins": 0.005212970077991486,
1000
+ "rewards/rejected": -0.02013060264289379,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 1.34,
1005
+ "learning_rate": 1.4781433892011132e-06,
1006
+ "logits/chosen": 0.7889922261238098,
1007
+ "logits/rejected": 0.8183968663215637,
1008
+ "logps/chosen": -90.72172546386719,
1009
+ "logps/rejected": -97.16673278808594,
1010
+ "loss": 0.0091,
1011
+ "rewards/accuracies": 0.35624998807907104,
1012
+ "rewards/chosen": -0.013522952795028687,
1013
+ "rewards/margins": 0.00625986885279417,
1014
+ "rewards/rejected": -0.019782820716500282,
1015
+ "step": 640
1016
+ },
1017
+ {
1018
+ "epoch": 1.36,
1019
+ "learning_rate": 1.3953046172178413e-06,
1020
+ "logits/chosen": 0.7950395345687866,
1021
+ "logits/rejected": 0.851498007774353,
1022
+ "logps/chosen": -101.07930755615234,
1023
+ "logps/rejected": -114.48579406738281,
1024
+ "loss": 0.0093,
1025
+ "rewards/accuracies": 0.4000000059604645,
1026
+ "rewards/chosen": -0.016296306625008583,
1027
+ "rewards/margins": 0.008597773499786854,
1028
+ "rewards/rejected": -0.024894079193472862,
1029
+ "step": 650
1030
+ },
1031
+ {
1032
+ "epoch": 1.38,
1033
+ "learning_rate": 1.3139467229135999e-06,
1034
+ "logits/chosen": 0.7387314438819885,
1035
+ "logits/rejected": 0.7517582178115845,
1036
+ "logps/chosen": -80.6947250366211,
1037
+ "logps/rejected": -86.94942474365234,
1038
+ "loss": 0.0089,
1039
+ "rewards/accuracies": 0.3812499940395355,
1040
+ "rewards/chosen": -0.01321180909872055,
1041
+ "rewards/margins": 0.0073172166012227535,
1042
+ "rewards/rejected": -0.02052902616560459,
1043
+ "step": 660
1044
+ },
1045
+ {
1046
+ "epoch": 1.4,
1047
+ "learning_rate": 1.2341787690142436e-06,
1048
+ "logits/chosen": 0.6672351956367493,
1049
+ "logits/rejected": 0.6918442249298096,
1050
+ "logps/chosen": -115.32958984375,
1051
+ "logps/rejected": -96.97526550292969,
1052
+ "loss": 0.0094,
1053
+ "rewards/accuracies": 0.35624998807907104,
1054
+ "rewards/chosen": -0.014516430906951427,
1055
+ "rewards/margins": 0.009004795923829079,
1056
+ "rewards/rejected": -0.02352122589945793,
1057
+ "step": 670
1058
+ },
1059
+ {
1060
+ "epoch": 1.42,
1061
+ "learning_rate": 1.1561076868822756e-06,
1062
+ "logits/chosen": 0.6968938708305359,
1063
+ "logits/rejected": 0.728511393070221,
1064
+ "logps/chosen": -76.32088470458984,
1065
+ "logps/rejected": -81.96578979492188,
1066
+ "loss": 0.0092,
1067
+ "rewards/accuracies": 0.34375,
1068
+ "rewards/chosen": -0.015820499509572983,
1069
+ "rewards/margins": 0.004979898687452078,
1070
+ "rewards/rejected": -0.0208003968000412,
1071
+ "step": 680
1072
+ },
1073
+ {
1074
+ "epoch": 1.44,
1075
+ "learning_rate": 1.079838133172111e-06,
1076
+ "logits/chosen": 0.7537049055099487,
1077
+ "logits/rejected": 0.8117996454238892,
1078
+ "logps/chosen": -94.59342956542969,
1079
+ "logps/rejected": -80.20372009277344,
1080
+ "loss": 0.0093,
1081
+ "rewards/accuracies": 0.3499999940395355,
1082
+ "rewards/chosen": -0.014598280191421509,
1083
+ "rewards/margins": 0.008172960951924324,
1084
+ "rewards/rejected": -0.022771239280700684,
1085
+ "step": 690
1086
+ },
1087
+ {
1088
+ "epoch": 1.47,
1089
+ "learning_rate": 1.0054723495346484e-06,
1090
+ "logits/chosen": 0.7987161874771118,
1091
+ "logits/rejected": 0.8007674217224121,
1092
+ "logps/chosen": -105.23921203613281,
1093
+ "logps/rejected": -97.75834655761719,
1094
+ "loss": 0.009,
1095
+ "rewards/accuracies": 0.375,
1096
+ "rewards/chosen": -0.017922451719641685,
1097
+ "rewards/margins": 0.008403345942497253,
1098
+ "rewards/rejected": -0.026325801387429237,
1099
+ "step": 700
1100
+ },
1101
+ {
1102
+ "epoch": 1.47,
1103
+ "eval_logits/chosen": 0.7355116009712219,
1104
+ "eval_logits/rejected": 0.7544857859611511,
1105
+ "eval_logps/chosen": -93.24905395507812,
1106
+ "eval_logps/rejected": -96.31977844238281,
1107
+ "eval_loss": 0.009318255819380283,
1108
+ "eval_rewards/accuracies": 0.3479999899864197,
1109
+ "eval_rewards/chosen": -0.015216803178191185,
1110
+ "eval_rewards/margins": 0.006010868586599827,
1111
+ "eval_rewards/rejected": -0.021227672696113586,
1112
+ "eval_runtime": 272.5589,
1113
+ "eval_samples_per_second": 7.338,
1114
+ "eval_steps_per_second": 0.459,
1115
+ "step": 700
1116
+ },
1117
+ {
1118
+ "epoch": 1.49,
1119
+ "learning_rate": 9.331100255592437e-07,
1120
+ "logits/chosen": 0.7550870180130005,
1121
+ "logits/rejected": 0.8333718180656433,
1122
+ "logps/chosen": -88.22233581542969,
1123
+ "logps/rejected": -81.27799224853516,
1124
+ "loss": 0.0091,
1125
+ "rewards/accuracies": 0.33125001192092896,
1126
+ "rewards/chosen": -0.01261846162378788,
1127
+ "rewards/margins": 0.007095014210790396,
1128
+ "rewards/rejected": -0.019713478162884712,
1129
+ "step": 710
1130
+ },
1131
+ {
1132
+ "epoch": 1.51,
1133
+ "learning_rate": 8.628481651367876e-07,
1134
+ "logits/chosen": 0.7663224339485168,
1135
+ "logits/rejected": 0.8483554720878601,
1136
+ "logps/chosen": -112.94798278808594,
1137
+ "logps/rejected": -100.04948425292969,
1138
+ "loss": 0.0091,
1139
+ "rewards/accuracies": 0.375,
1140
+ "rewards/chosen": -0.014603344723582268,
1141
+ "rewards/margins": 0.008872651495039463,
1142
+ "rewards/rejected": -0.023475993424654007,
1143
+ "step": 720
1144
+ },
1145
+ {
1146
+ "epoch": 1.53,
1147
+ "learning_rate": 7.947809564230446e-07,
1148
+ "logits/chosen": 0.7916958928108215,
1149
+ "logits/rejected": 0.8508380651473999,
1150
+ "logps/chosen": -96.55794525146484,
1151
+ "logps/rejected": -95.86845397949219,
1152
+ "loss": 0.0092,
1153
+ "rewards/accuracies": 0.4124999940395355,
1154
+ "rewards/chosen": -0.017020542174577713,
1155
+ "rewards/margins": 0.006455309689044952,
1156
+ "rewards/rejected": -0.023475851863622665,
1157
+ "step": 730
1158
+ },
1159
+ {
1160
+ "epoch": 1.55,
1161
+ "learning_rate": 7.289996455765749e-07,
1162
+ "logits/chosen": 0.716216504573822,
1163
+ "logits/rejected": 0.7626134157180786,
1164
+ "logps/chosen": -87.54243469238281,
1165
+ "logps/rejected": -81.1460952758789,
1166
+ "loss": 0.0094,
1167
+ "rewards/accuracies": 0.28125,
1168
+ "rewards/chosen": -0.012193633243441582,
1169
+ "rewards/margins": 0.004410223104059696,
1170
+ "rewards/rejected": -0.016603857278823853,
1171
+ "step": 740
1172
+ },
1173
+ {
1174
+ "epoch": 1.57,
1175
+ "learning_rate": 6.655924144404907e-07,
1176
+ "logits/chosen": 0.7234289050102234,
1177
+ "logits/rejected": 0.7377198338508606,
1178
+ "logps/chosen": -89.62705993652344,
1179
+ "logps/rejected": -86.00987243652344,
1180
+ "loss": 0.0092,
1181
+ "rewards/accuracies": 0.36250001192092896,
1182
+ "rewards/chosen": -0.012271616607904434,
1183
+ "rewards/margins": 0.00808991864323616,
1184
+ "rewards/rejected": -0.020361537113785744,
1185
+ "step": 750
1186
+ },
1187
+ {
1188
+ "epoch": 1.59,
1189
+ "learning_rate": 6.046442623320145e-07,
1190
+ "logits/chosen": 0.7715792655944824,
1191
+ "logits/rejected": 0.803941547870636,
1192
+ "logps/chosen": -109.85160827636719,
1193
+ "logps/rejected": -105.02885437011719,
1194
+ "loss": 0.0091,
1195
+ "rewards/accuracies": 0.375,
1196
+ "rewards/chosen": -0.018752435222268105,
1197
+ "rewards/margins": 0.005930652376264334,
1198
+ "rewards/rejected": -0.024683088064193726,
1199
+ "step": 760
1200
+ },
1201
+ {
1202
+ "epoch": 1.61,
1203
+ "learning_rate": 5.462368920983249e-07,
1204
+ "logits/chosen": 0.7687041759490967,
1205
+ "logits/rejected": 0.7833288311958313,
1206
+ "logps/chosen": -85.72100067138672,
1207
+ "logps/rejected": -73.33113098144531,
1208
+ "loss": 0.0092,
1209
+ "rewards/accuracies": 0.34375,
1210
+ "rewards/chosen": -0.013071869499981403,
1211
+ "rewards/margins": 0.006707571446895599,
1212
+ "rewards/rejected": -0.019779440015554428,
1213
+ "step": 770
1214
+ },
1215
+ {
1216
+ "epoch": 1.63,
1217
+ "learning_rate": 4.904486005914027e-07,
1218
+ "logits/chosen": 0.7501081228256226,
1219
+ "logits/rejected": 0.7631546258926392,
1220
+ "logps/chosen": -118.79168701171875,
1221
+ "logps/rejected": -109.3132095336914,
1222
+ "loss": 0.0089,
1223
+ "rewards/accuracies": 0.39375001192092896,
1224
+ "rewards/chosen": -0.01289316825568676,
1225
+ "rewards/margins": 0.010348210111260414,
1226
+ "rewards/rejected": -0.023241376504302025,
1227
+ "step": 780
1228
+ },
1229
+ {
1230
+ "epoch": 1.65,
1231
+ "learning_rate": 4.373541737087264e-07,
1232
+ "logits/chosen": 0.7186606526374817,
1233
+ "logits/rejected": 0.7436104416847229,
1234
+ "logps/chosen": -101.85713958740234,
1235
+ "logps/rejected": -97.64336395263672,
1236
+ "loss": 0.0094,
1237
+ "rewards/accuracies": 0.3187499940395355,
1238
+ "rewards/chosen": -0.015154453925788403,
1239
+ "rewards/margins": 0.006247458979487419,
1240
+ "rewards/rejected": -0.021401915699243546,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 1.67,
1245
+ "learning_rate": 3.8702478614051353e-07,
1246
+ "logits/chosen": 0.7555572986602783,
1247
+ "logits/rejected": 0.7140852808952332,
1248
+ "logps/chosen": -100.46399688720703,
1249
+ "logps/rejected": -92.70053100585938,
1250
+ "loss": 0.009,
1251
+ "rewards/accuracies": 0.42500001192092896,
1252
+ "rewards/chosen": -0.013208119198679924,
1253
+ "rewards/margins": 0.012039312161505222,
1254
+ "rewards/rejected": -0.02524743042886257,
1255
+ "step": 800
1256
+ },
1257
+ {
1258
+ "epoch": 1.67,
1259
+ "eval_logits/chosen": 0.732753574848175,
1260
+ "eval_logits/rejected": 0.7523351311683655,
1261
+ "eval_logps/chosen": -93.27485656738281,
1262
+ "eval_logps/rejected": -96.37907409667969,
1263
+ "eval_loss": 0.009285110980272293,
1264
+ "eval_rewards/accuracies": 0.34200000762939453,
1265
+ "eval_rewards/chosen": -0.01547484565526247,
1266
+ "eval_rewards/margins": 0.006345819681882858,
1267
+ "eval_rewards/rejected": -0.021820668131113052,
1268
+ "eval_runtime": 272.3597,
1269
+ "eval_samples_per_second": 7.343,
1270
+ "eval_steps_per_second": 0.459,
1271
+ "step": 800
1272
+ },
1273
+ {
1274
+ "epoch": 1.7,
1275
+ "learning_rate": 3.3952790595787986e-07,
1276
+ "logits/chosen": 0.8058096170425415,
1277
+ "logits/rejected": 0.8189395666122437,
1278
+ "logps/chosen": -116.7092514038086,
1279
+ "logps/rejected": -103.36915588378906,
1280
+ "loss": 0.0093,
1281
+ "rewards/accuracies": 0.3812499940395355,
1282
+ "rewards/chosen": -0.016209406778216362,
1283
+ "rewards/margins": 0.006806619465351105,
1284
+ "rewards/rejected": -0.023016026243567467,
1285
+ "step": 810
1286
+ },
1287
+ {
1288
+ "epoch": 1.72,
1289
+ "learning_rate": 2.9492720416985004e-07,
1290
+ "logits/chosen": 0.6810709238052368,
1291
+ "logits/rejected": 0.6950341463088989,
1292
+ "logps/chosen": -95.54825592041016,
1293
+ "logps/rejected": -99.33860778808594,
1294
+ "loss": 0.0093,
1295
+ "rewards/accuracies": 0.34375,
1296
+ "rewards/chosen": -0.014285333454608917,
1297
+ "rewards/margins": 0.007627467624843121,
1298
+ "rewards/rejected": -0.021912802010774612,
1299
+ "step": 820
1300
+ },
1301
+ {
1302
+ "epoch": 1.74,
1303
+ "learning_rate": 2.5328246937043526e-07,
1304
+ "logits/chosen": 0.756773829460144,
1305
+ "logits/rejected": 0.7951828837394714,
1306
+ "logps/chosen": -96.31170654296875,
1307
+ "logps/rejected": -100.44956970214844,
1308
+ "loss": 0.0089,
1309
+ "rewards/accuracies": 0.39375001192092896,
1310
+ "rewards/chosen": -0.016868023201823235,
1311
+ "rewards/margins": 0.008540956303477287,
1312
+ "rewards/rejected": -0.025408979505300522,
1313
+ "step": 830
1314
+ },
1315
+ {
1316
+ "epoch": 1.76,
1317
+ "learning_rate": 2.1464952759020857e-07,
1318
+ "logits/chosen": 0.6711292266845703,
1319
+ "logits/rejected": 0.7201661467552185,
1320
+ "logps/chosen": -95.08512878417969,
1321
+ "logps/rejected": -97.06068420410156,
1322
+ "loss": 0.0094,
1323
+ "rewards/accuracies": 0.3062500059604645,
1324
+ "rewards/chosen": -0.014699302613735199,
1325
+ "rewards/margins": 0.005744755733758211,
1326
+ "rewards/rejected": -0.020444059744477272,
1327
+ "step": 840
1328
+ },
1329
+ {
1330
+ "epoch": 1.78,
1331
+ "learning_rate": 1.790801674598186e-07,
1332
+ "logits/chosen": 0.7383053302764893,
1333
+ "logits/rejected": 0.7714890837669373,
1334
+ "logps/chosen": -124.93879699707031,
1335
+ "logps/rejected": -99.6561050415039,
1336
+ "loss": 0.0091,
1337
+ "rewards/accuracies": 0.38749998807907104,
1338
+ "rewards/chosen": -0.015930239111185074,
1339
+ "rewards/margins": 0.009742177091538906,
1340
+ "rewards/rejected": -0.025672415271401405,
1341
+ "step": 850
1342
+ },
1343
+ {
1344
+ "epoch": 1.8,
1345
+ "learning_rate": 1.4662207078575685e-07,
1346
+ "logits/chosen": 0.7659719586372375,
1347
+ "logits/rejected": 0.7763293385505676,
1348
+ "logps/chosen": -90.79667663574219,
1349
+ "logps/rejected": -88.97042083740234,
1350
+ "loss": 0.0089,
1351
+ "rewards/accuracies": 0.3687500059604645,
1352
+ "rewards/chosen": -0.0146828293800354,
1353
+ "rewards/margins": 0.010549478232860565,
1354
+ "rewards/rejected": -0.025232309475541115,
1355
+ "step": 860
1356
+ },
1357
+ {
1358
+ "epoch": 1.82,
1359
+ "learning_rate": 1.1731874863145143e-07,
1360
+ "logits/chosen": 0.7607251405715942,
1361
+ "logits/rejected": 0.7975329160690308,
1362
+ "logps/chosen": -93.11439514160156,
1363
+ "logps/rejected": -90.27216339111328,
1364
+ "loss": 0.0092,
1365
+ "rewards/accuracies": 0.3812499940395355,
1366
+ "rewards/chosen": -0.01333933137357235,
1367
+ "rewards/margins": 0.008563781157135963,
1368
+ "rewards/rejected": -0.021903112530708313,
1369
+ "step": 870
1370
+ },
1371
+ {
1372
+ "epoch": 1.84,
1373
+ "learning_rate": 9.120948298936422e-08,
1374
+ "logits/chosen": 0.7698312997817993,
1375
+ "logits/rejected": 0.8300016522407532,
1376
+ "logps/chosen": -88.53062438964844,
1377
+ "logps/rejected": -85.57463073730469,
1378
+ "loss": 0.0088,
1379
+ "rewards/accuracies": 0.3499999940395355,
1380
+ "rewards/chosen": -0.012708066031336784,
1381
+ "rewards/margins": 0.008814197964966297,
1382
+ "rewards/rejected": -0.021522263064980507,
1383
+ "step": 880
1384
+ },
1385
+ {
1386
+ "epoch": 1.86,
1387
+ "learning_rate": 6.832927412229017e-08,
1388
+ "logits/chosen": 0.7292782068252563,
1389
+ "logits/rejected": 0.7688521146774292,
1390
+ "logps/chosen": -91.70903015136719,
1391
+ "logps/rejected": -94.77793884277344,
1392
+ "loss": 0.0089,
1393
+ "rewards/accuracies": 0.375,
1394
+ "rewards/chosen": -0.014614684507250786,
1395
+ "rewards/margins": 0.009316334500908852,
1396
+ "rewards/rejected": -0.023931019008159637,
1397
+ "step": 890
1398
+ },
1399
+ {
1400
+ "epoch": 1.88,
1401
+ "learning_rate": 4.870879364444109e-08,
1402
+ "logits/chosen": 0.7349046468734741,
1403
+ "logits/rejected": 0.7809796333312988,
1404
+ "logps/chosen": -88.32861328125,
1405
+ "logps/rejected": -87.64916229248047,
1406
+ "loss": 0.0091,
1407
+ "rewards/accuracies": 0.3375000059604645,
1408
+ "rewards/chosen": -0.014183936640620232,
1409
+ "rewards/margins": 0.008784374222159386,
1410
+ "rewards/rejected": -0.022968310862779617,
1411
+ "step": 900
1412
+ },
1413
+ {
1414
+ "epoch": 1.88,
1415
+ "eval_logits/chosen": 0.7319999933242798,
1416
+ "eval_logits/rejected": 0.75148606300354,
1417
+ "eval_logps/chosen": -93.28408813476562,
1418
+ "eval_logps/rejected": -96.38092041015625,
1419
+ "eval_loss": 0.009287585504353046,
1420
+ "eval_rewards/accuracies": 0.3479999899864197,
1421
+ "eval_rewards/chosen": -0.015567170456051826,
1422
+ "eval_rewards/margins": 0.00627197464928031,
1423
+ "eval_rewards/rejected": -0.021839145570993423,
1424
+ "eval_runtime": 272.272,
1425
+ "eval_samples_per_second": 7.346,
1426
+ "eval_steps_per_second": 0.459,
1427
+ "step": 900
1428
+ },
1429
+ {
1430
+ "epoch": 1.9,
1431
+ "learning_rate": 3.237434340521789e-08,
1432
+ "logits/chosen": 0.7097499966621399,
1433
+ "logits/rejected": 0.781818687915802,
1434
+ "logps/chosen": -101.80133056640625,
1435
+ "logps/rejected": -103.1551742553711,
1436
+ "loss": 0.009,
1437
+ "rewards/accuracies": 0.375,
1438
+ "rewards/chosen": -0.015344848856329918,
1439
+ "rewards/margins": 0.008192854933440685,
1440
+ "rewards/rejected": -0.023537704721093178,
1441
+ "step": 910
1442
+ },
1443
+ {
1444
+ "epoch": 1.93,
1445
+ "learning_rate": 1.93478202307823e-08,
1446
+ "logits/chosen": 0.7037514448165894,
1447
+ "logits/rejected": 0.7321338653564453,
1448
+ "logps/chosen": -97.42137145996094,
1449
+ "logps/rejected": -94.04901123046875,
1450
+ "loss": 0.0093,
1451
+ "rewards/accuracies": 0.32499998807907104,
1452
+ "rewards/chosen": -0.013967466540634632,
1453
+ "rewards/margins": 0.005121930036693811,
1454
+ "rewards/rejected": -0.01908939704298973,
1455
+ "step": 920
1456
+ },
1457
+ {
1458
+ "epoch": 1.95,
1459
+ "learning_rate": 9.646686570697062e-09,
1460
+ "logits/chosen": 0.7186129093170166,
1461
+ "logits/rejected": 0.8163198232650757,
1462
+ "logps/chosen": -103.3648681640625,
1463
+ "logps/rejected": -99.19805145263672,
1464
+ "loss": 0.0092,
1465
+ "rewards/accuracies": 0.35624998807907104,
1466
+ "rewards/chosen": -0.012637853622436523,
1467
+ "rewards/margins": 0.008680110797286034,
1468
+ "rewards/rejected": -0.021317964419722557,
1469
+ "step": 930
1470
+ },
1471
+ {
1472
+ "epoch": 1.97,
1473
+ "learning_rate": 3.283947088983663e-09,
1474
+ "logits/chosen": 0.7728757858276367,
1475
+ "logits/rejected": 0.7769932150840759,
1476
+ "logps/chosen": -98.65019989013672,
1477
+ "logps/rejected": -108.49007415771484,
1478
+ "loss": 0.0092,
1479
+ "rewards/accuracies": 0.4000000059604645,
1480
+ "rewards/chosen": -0.018378112465143204,
1481
+ "rewards/margins": 0.008477538824081421,
1482
+ "rewards/rejected": -0.026855653151869774,
1483
+ "step": 940
1484
+ },
1485
+ {
1486
+ "epoch": 1.99,
1487
+ "learning_rate": 2.681312309735229e-10,
1488
+ "logits/chosen": 0.8041951060295105,
1489
+ "logits/rejected": 0.818127453327179,
1490
+ "logps/chosen": -92.90714263916016,
1491
+ "logps/rejected": -102.76998138427734,
1492
+ "loss": 0.0093,
1493
+ "rewards/accuracies": 0.32499998807907104,
1494
+ "rewards/chosen": -0.014978660270571709,
1495
+ "rewards/margins": 0.006398401223123074,
1496
+ "rewards/rejected": -0.021377062425017357,
1497
+ "step": 950
1498
+ },
1499
+ {
1500
+ "epoch": 2.0,
1501
+ "step": 954,
1502
  "total_flos": 0.0,
1503
+ "train_loss": 0.008370190804172112,
1504
+ "train_runtime": 12459.0229,
1505
+ "train_samples_per_second": 4.907,
1506
+ "train_steps_per_second": 0.077
1507
  }
1508
  ],
1509
  "logging_steps": 10,
1510
+ "max_steps": 954,
1511
  "num_input_tokens_seen": 0,
1512
  "num_train_epochs": 2,
1513
  "save_steps": 100,