RikkiXu commited on
Commit
fed6648
1 Parent(s): 605614f

Model save

Browse files
README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- base_model: princeton-nlp/Mistral-7B-Base-SFT-SimPO
3
  tags:
4
  - trl
5
  - dpo
@@ -14,7 +13,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # zephyr-7b-dpo-full
16
 
17
- This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-SimPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-SimPO) on the None dataset.
18
 
19
  ## Model description
20
 
@@ -33,7 +32,7 @@ More information needed
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
- - learning_rate: 1e-07
37
  - train_batch_size: 4
38
  - eval_batch_size: 4
39
  - seed: 42
 
1
  ---
 
2
  tags:
3
  - trl
4
  - dpo
 
13
 
14
  # zephyr-7b-dpo-full
15
 
16
+ This model was trained from scratch on the None dataset.
17
 
18
  ## Model description
19
 
 
32
  ### Training hyperparameters
33
 
34
  The following hyperparameters were used during training:
35
+ - learning_rate: 1e-08
36
  - train_batch_size: 4
37
  - eval_batch_size: 4
38
  - seed: 42
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.5564739256078942,
4
- "train_runtime": 6859.6403,
5
- "train_samples": 56236,
6
- "train_samples_per_second": 8.198,
7
- "train_steps_per_second": 0.064
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6880149315033469,
4
+ "train_runtime": 4603.368,
5
+ "train_samples": 38288,
6
+ "train_samples_per_second": 8.317,
7
+ "train_steps_per_second": 0.065
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.3",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e692100307a70f65bd0d21036b34ef68687d64f8dc88cc7bd88cac3cb499b72
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a506977023960c048f2933f9aec9fc01ac4cef4294a89c59fb94311d62d0ba
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22c11cd667919a180be90a638f8180dfe57b3af895c1fe13ee1bda06824ae1db
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f36b418c6f0204d63a482c4d40cf9a099d1c71913006174de004f3468b2600ac
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aee128bd109134555c87391e5c9defecae53a6eb4d7c164396cfda3229777b91
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0098c8b3746d7ba8d54ad9a79ed49c5d66268bbf89e84d00df21498d52d9cb42
3
  size 4540516344
runs/Jul03_20-00-02_n136-129-074/events.out.tfevents.1720008022.n136-129-074.3083115.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8002adcf5dd4a58d504c1dd4b67ec2f85903dae943432f38436700f0f8a91980
3
- size 19297
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c476ad368f70bb551a6766cd2d3c0fa0a42b8470e8b61a505a8c02a979fe20e
3
+ size 25843
tokenizer.json CHANGED
@@ -134,7 +134,6 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
- "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.5564739256078942,
4
- "train_runtime": 6859.6403,
5
- "train_samples": 56236,
6
- "train_samples_per_second": 8.198,
7
- "train_steps_per_second": 0.064
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6880149315033469,
4
+ "train_runtime": 4603.368,
5
+ "train_samples": 38288,
6
+ "train_samples_per_second": 8.317,
7
+ "train_steps_per_second": 0.065
8
  }
trainer_state.json CHANGED
@@ -1,22 +1,22 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9988623435722411,
5
  "eval_steps": 10000000,
6
- "global_step": 439,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 113.4577168560686,
14
- "learning_rate": 2.2727272727272727e-09,
15
- "logits/chosen": -1.6768856048583984,
16
- "logits/rejected": -1.7259055376052856,
17
- "logps/chosen": -1.2793102264404297,
18
- "logps/rejected": -1.2162058353424072,
19
- "loss": 0.6934,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -24,662 +24,452 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.02,
28
- "grad_norm": 129.24988403709358,
29
- "learning_rate": 2.2727272727272725e-08,
30
- "logits/chosen": -1.7028687000274658,
31
- "logits/rejected": -1.6680525541305542,
32
- "logps/chosen": -1.2131904363632202,
33
- "logps/rejected": -1.2204842567443848,
34
  "loss": 0.6933,
35
- "rewards/accuracies": 0.4652777910232544,
36
- "rewards/chosen": 0.0011519736144691706,
37
- "rewards/margins": -0.001473029493354261,
38
- "rewards/rejected": 0.0026250029914081097,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05,
43
- "grad_norm": 128.90531745075424,
44
- "learning_rate": 4.545454545454545e-08,
45
- "logits/chosen": -1.7795600891113281,
46
- "logits/rejected": -1.7348560094833374,
47
- "logps/chosen": -1.1446261405944824,
48
- "logps/rejected": -1.1849511861801147,
49
- "loss": 0.6903,
50
- "rewards/accuracies": 0.518750011920929,
51
- "rewards/chosen": -0.01403624564409256,
52
- "rewards/margins": 0.0008796676993370056,
53
- "rewards/rejected": -0.014915913343429565,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.07,
58
- "grad_norm": 130.88686983721493,
59
- "learning_rate": 6.818181818181817e-08,
60
- "logits/chosen": -1.7440669536590576,
61
- "logits/rejected": -1.6752160787582397,
62
- "logps/chosen": -1.194157600402832,
63
- "logps/rejected": -1.246543526649475,
64
- "loss": 0.6755,
65
- "rewards/accuracies": 0.637499988079071,
66
- "rewards/chosen": -0.11256156116724014,
67
- "rewards/margins": 0.05156043916940689,
68
- "rewards/rejected": -0.16412200033664703,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.09,
73
- "grad_norm": 99.69346579706567,
74
- "learning_rate": 9.09090909090909e-08,
75
- "logits/chosen": -1.7316129207611084,
76
- "logits/rejected": -1.6658855676651,
77
- "logps/chosen": -1.2354015111923218,
78
- "logps/rejected": -1.3086662292480469,
79
- "loss": 0.6598,
80
- "rewards/accuracies": 0.643750011920929,
81
- "rewards/chosen": -0.28453049063682556,
82
- "rewards/margins": 0.20941098034381866,
83
- "rewards/rejected": -0.49394145607948303,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.11,
88
- "grad_norm": 94.76247831424028,
89
- "learning_rate": 9.994307990108962e-08,
90
- "logits/chosen": -1.6965711116790771,
91
- "logits/rejected": -1.6329838037490845,
92
- "logps/chosen": -1.254882574081421,
93
- "logps/rejected": -1.3045636415481567,
94
- "loss": 0.6257,
95
- "rewards/accuracies": 0.675000011920929,
96
- "rewards/chosen": -0.4679547846317291,
97
- "rewards/margins": 0.29588648676872253,
98
- "rewards/rejected": -0.7638412714004517,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.14,
103
- "grad_norm": 134.02849477110337,
104
- "learning_rate": 9.959570405988094e-08,
105
- "logits/chosen": -1.7339792251586914,
106
- "logits/rejected": -1.6545541286468506,
107
- "logps/chosen": -1.1747747659683228,
108
- "logps/rejected": -1.2515872716903687,
109
- "loss": 0.628,
110
- "rewards/accuracies": 0.675000011920929,
111
- "rewards/chosen": -0.6567646265029907,
112
- "rewards/margins": 0.2488430291414261,
113
- "rewards/rejected": -0.9056077003479004,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.16,
118
- "grad_norm": 81.9825685637945,
119
- "learning_rate": 9.893476820924666e-08,
120
- "logits/chosen": -1.8203418254852295,
121
- "logits/rejected": -1.7378566265106201,
122
- "logps/chosen": -1.2860430479049683,
123
- "logps/rejected": -1.3670638799667358,
124
- "loss": 0.5884,
125
- "rewards/accuracies": 0.699999988079071,
126
- "rewards/chosen": -0.732010006904602,
127
- "rewards/margins": 0.3435601592063904,
128
- "rewards/rejected": -1.0755703449249268,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.18,
133
- "grad_norm": 111.43014849080478,
134
- "learning_rate": 9.796445099843647e-08,
135
- "logits/chosen": -1.8093068599700928,
136
- "logits/rejected": -1.7265288829803467,
137
- "logps/chosen": -1.2918837070465088,
138
- "logps/rejected": -1.3895039558410645,
139
- "loss": 0.6282,
140
- "rewards/accuracies": 0.6625000238418579,
141
- "rewards/chosen": -0.8491527438163757,
142
- "rewards/margins": 0.3709031641483307,
143
- "rewards/rejected": -1.2200558185577393,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.2,
148
- "grad_norm": 99.11416934571663,
149
- "learning_rate": 9.669088708527066e-08,
150
- "logits/chosen": -1.756994605064392,
151
- "logits/rejected": -1.6932170391082764,
152
- "logps/chosen": -1.3391311168670654,
153
- "logps/rejected": -1.3889451026916504,
154
- "loss": 0.5898,
155
- "rewards/accuracies": 0.675000011920929,
156
- "rewards/chosen": -1.0343393087387085,
157
- "rewards/margins": 0.4276931881904602,
158
- "rewards/rejected": -1.462032437324524,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.23,
163
- "grad_norm": 108.94100525225643,
164
- "learning_rate": 9.512212835085849e-08,
165
- "logits/chosen": -1.7921009063720703,
166
- "logits/rejected": -1.7081331014633179,
167
- "logps/chosen": -1.2848669290542603,
168
- "logps/rejected": -1.379817247390747,
169
- "loss": 0.5831,
170
- "rewards/accuracies": 0.706250011920929,
171
- "rewards/chosen": -1.1641167402267456,
172
- "rewards/margins": 0.5224048495292664,
173
- "rewards/rejected": -1.6865215301513672,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.25,
178
- "grad_norm": 94.58328257186776,
179
- "learning_rate": 9.326809299301306e-08,
180
- "logits/chosen": -1.7934032678604126,
181
- "logits/rejected": -1.692413568496704,
182
- "logps/chosen": -1.3193799257278442,
183
- "logps/rejected": -1.4346280097961426,
184
- "loss": 0.5776,
185
- "rewards/accuracies": 0.7250000238418579,
186
- "rewards/chosen": -1.1873786449432373,
187
- "rewards/margins": 0.6364501714706421,
188
- "rewards/rejected": -1.8238286972045898,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.27,
193
- "grad_norm": 123.65835816529943,
194
- "learning_rate": 9.114050282021158e-08,
195
- "logits/chosen": -1.7867753505706787,
196
- "logits/rejected": -1.7270488739013672,
197
- "logps/chosen": -1.2704253196716309,
198
- "logps/rejected": -1.373067855834961,
199
- "loss": 0.5626,
200
- "rewards/accuracies": 0.7562500238418579,
201
- "rewards/chosen": -1.228627324104309,
202
- "rewards/margins": 0.5525014400482178,
203
- "rewards/rejected": -1.7811288833618164,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.3,
208
- "grad_norm": 95.55607076940454,
209
- "learning_rate": 8.875280914254802e-08,
210
- "logits/chosen": -1.7855722904205322,
211
- "logits/rejected": -1.6952784061431885,
212
- "logps/chosen": -1.3185499906539917,
213
- "logps/rejected": -1.4162604808807373,
214
- "loss": 0.5666,
215
- "rewards/accuracies": 0.7250000238418579,
216
- "rewards/chosen": -1.2571992874145508,
217
- "rewards/margins": 0.6928548812866211,
218
- "rewards/rejected": -1.9500541687011719,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.32,
223
- "grad_norm": 78.06311583858023,
224
- "learning_rate": 8.612010772821971e-08,
225
- "logits/chosen": -1.8129308223724365,
226
- "logits/rejected": -1.7682344913482666,
227
- "logps/chosen": -1.3229562044143677,
228
- "logps/rejected": -1.361987829208374,
229
- "loss": 0.5656,
230
- "rewards/accuracies": 0.762499988079071,
231
- "rewards/chosen": -1.0907716751098633,
232
- "rewards/margins": 0.6394159197807312,
233
- "rewards/rejected": -1.7301876544952393,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.34,
238
- "grad_norm": 78.88051547861711,
239
- "learning_rate": 8.325904336322055e-08,
240
- "logits/chosen": -1.7850853204727173,
241
- "logits/rejected": -1.7291721105575562,
242
- "logps/chosen": -1.2866876125335693,
243
- "logps/rejected": -1.402489423751831,
244
- "loss": 0.5657,
245
- "rewards/accuracies": 0.7250000238418579,
246
- "rewards/chosen": -1.1932275295257568,
247
- "rewards/margins": 0.6862825155258179,
248
- "rewards/rejected": -1.8795099258422852,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.36,
253
- "grad_norm": 80.61975465340338,
254
- "learning_rate": 8.01877046176447e-08,
255
- "logits/chosen": -1.7330862283706665,
256
- "logits/rejected": -1.6600639820098877,
257
- "logps/chosen": -1.312774419784546,
258
- "logps/rejected": -1.419638991355896,
259
- "loss": 0.566,
260
- "rewards/accuracies": 0.71875,
261
- "rewards/chosen": -1.3447433710098267,
262
- "rewards/margins": 0.5547876358032227,
263
- "rewards/rejected": -1.8995310068130493,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.39,
268
- "grad_norm": 78.96860347790852,
269
- "learning_rate": 7.692550948392249e-08,
270
- "logits/chosen": -1.7879507541656494,
271
- "logits/rejected": -1.7219922542572021,
272
- "logps/chosen": -1.3235785961151123,
273
- "logps/rejected": -1.3983043432235718,
274
- "loss": 0.5589,
275
- "rewards/accuracies": 0.7250000238418579,
276
- "rewards/chosen": -1.1332305669784546,
277
- "rewards/margins": 0.6428099870681763,
278
- "rewards/rejected": -1.7760404348373413,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.41,
283
- "grad_norm": 91.20361542856642,
284
- "learning_rate": 7.349308261002021e-08,
285
- "logits/chosen": -1.744236707687378,
286
- "logits/rejected": -1.685158133506775,
287
- "logps/chosen": -1.3169732093811035,
288
- "logps/rejected": -1.422654390335083,
289
- "loss": 0.5541,
290
- "rewards/accuracies": 0.731249988079071,
291
- "rewards/chosen": -1.1988707780838013,
292
- "rewards/margins": 0.5603083372116089,
293
- "rewards/rejected": -1.7591791152954102,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.43,
298
- "grad_norm": 85.52532511636508,
299
- "learning_rate": 6.991212490377531e-08,
300
- "logits/chosen": -1.7989540100097656,
301
- "logits/rejected": -1.7449232339859009,
302
- "logps/chosen": -1.341355562210083,
303
- "logps/rejected": -1.4322634935379028,
304
- "loss": 0.528,
305
- "rewards/accuracies": 0.768750011920929,
306
- "rewards/chosen": -1.112524390220642,
307
- "rewards/margins": 0.6936608552932739,
308
- "rewards/rejected": -1.8061851263046265,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.46,
313
- "grad_norm": 103.62294205062797,
314
- "learning_rate": 6.620527633276978e-08,
315
- "logits/chosen": -1.7379404306411743,
316
- "logits/rejected": -1.6623157262802124,
317
- "logps/chosen": -1.307138204574585,
318
- "logps/rejected": -1.4737988710403442,
319
- "loss": 0.54,
320
- "rewards/accuracies": 0.768750011920929,
321
- "rewards/chosen": -1.126852035522461,
322
- "rewards/margins": 0.7987160682678223,
323
- "rewards/rejected": -1.9255679845809937,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.48,
328
- "grad_norm": 97.12306479641116,
329
- "learning_rate": 6.239597278716581e-08,
330
- "logits/chosen": -1.8088064193725586,
331
- "logits/rejected": -1.74566650390625,
332
- "logps/chosen": -1.3601690530776978,
333
- "logps/rejected": -1.421120285987854,
334
- "loss": 0.5357,
335
- "rewards/accuracies": 0.7562500238418579,
336
- "rewards/chosen": -1.1483900547027588,
337
- "rewards/margins": 0.8161466717720032,
338
- "rewards/rejected": -1.9645369052886963,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.5,
343
- "grad_norm": 73.94190937238754,
344
- "learning_rate": 5.8508297910462456e-08,
345
- "logits/chosen": -1.75725519657135,
346
- "logits/rejected": -1.6749064922332764,
347
- "logps/chosen": -1.2782678604125977,
348
- "logps/rejected": -1.4215964078903198,
349
- "loss": 0.5266,
350
- "rewards/accuracies": 0.7875000238418579,
351
- "rewards/chosen": -1.147524356842041,
352
- "rewards/margins": 0.8555541038513184,
353
- "rewards/rejected": -2.0030786991119385,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.52,
358
- "grad_norm": 102.01553665817238,
359
- "learning_rate": 5.456683083494731e-08,
360
- "logits/chosen": -1.7501509189605713,
361
- "logits/rejected": -1.7102434635162354,
362
- "logps/chosen": -1.2995601892471313,
363
- "logps/rejected": -1.3736200332641602,
364
- "loss": 0.5541,
365
- "rewards/accuracies": 0.699999988079071,
366
- "rewards/chosen": -1.057513952255249,
367
- "rewards/margins": 0.5031803846359253,
368
- "rewards/rejected": -1.5606944561004639,
369
  "step": 230
370
  },
371
  {
372
- "epoch": 0.55,
373
- "grad_norm": 111.25877684367076,
374
- "learning_rate": 5.059649078450834e-08,
375
- "logits/chosen": -1.7446762323379517,
376
- "logits/rejected": -1.6967140436172485,
377
- "logps/chosen": -1.2858344316482544,
378
- "logps/rejected": -1.407447338104248,
379
- "loss": 0.5275,
380
- "rewards/accuracies": 0.731249988079071,
381
- "rewards/chosen": -1.174809217453003,
382
- "rewards/margins": 0.595741868019104,
383
- "rewards/rejected": -1.770551085472107,
384
  "step": 240
385
  },
386
  {
387
- "epoch": 0.57,
388
- "grad_norm": 89.74032522739078,
389
- "learning_rate": 4.6622379527277186e-08,
390
- "logits/chosen": -1.7489802837371826,
391
- "logits/rejected": -1.6957324743270874,
392
- "logps/chosen": -1.262298345565796,
393
- "logps/rejected": -1.361208438873291,
394
- "loss": 0.5316,
395
- "rewards/accuracies": 0.6812499761581421,
396
- "rewards/chosen": -1.2340971231460571,
397
- "rewards/margins": 0.6583037376403809,
398
- "rewards/rejected": -1.8924009799957275,
399
  "step": 250
400
  },
401
  {
402
- "epoch": 0.59,
403
- "grad_norm": 75.51753806333156,
404
- "learning_rate": 4.26696226741691e-08,
405
- "logits/chosen": -1.7651500701904297,
406
- "logits/rejected": -1.6915719509124756,
407
- "logps/chosen": -1.3747735023498535,
408
- "logps/rejected": -1.4673488140106201,
409
- "loss": 0.5304,
410
- "rewards/accuracies": 0.6875,
411
- "rewards/chosen": -1.4194996356964111,
412
- "rewards/margins": 0.7260831594467163,
413
- "rewards/rejected": -2.145582914352417,
414
  "step": 260
415
  },
416
  {
417
- "epoch": 0.61,
418
- "grad_norm": 81.81714896589709,
419
- "learning_rate": 3.876321082668098e-08,
420
- "logits/chosen": -1.8280465602874756,
421
- "logits/rejected": -1.7529118061065674,
422
- "logps/chosen": -1.3470970392227173,
423
- "logps/rejected": -1.435733437538147,
424
- "loss": 0.5229,
425
- "rewards/accuracies": 0.7749999761581421,
426
- "rewards/chosen": -1.0860626697540283,
427
- "rewards/margins": 0.7932750582695007,
428
- "rewards/rejected": -1.8793376684188843,
429
  "step": 270
430
  },
431
  {
432
- "epoch": 0.64,
433
- "grad_norm": 88.74965472733419,
434
- "learning_rate": 3.492784157826244e-08,
435
- "logits/chosen": -1.7529668807983398,
436
- "logits/rejected": -1.6530554294586182,
437
- "logps/chosen": -1.3322269916534424,
438
- "logps/rejected": -1.4489867687225342,
439
- "loss": 0.5263,
440
- "rewards/accuracies": 0.731249988079071,
441
- "rewards/chosen": -1.0935055017471313,
442
- "rewards/margins": 0.8137003779411316,
443
- "rewards/rejected": -1.9072058200836182,
444
  "step": 280
445
  },
446
  {
447
- "epoch": 0.66,
448
- "grad_norm": 76.0414974261225,
449
- "learning_rate": 3.118776336817812e-08,
450
- "logits/chosen": -1.7995818853378296,
451
- "logits/rejected": -1.7317774295806885,
452
- "logps/chosen": -1.3204050064086914,
453
- "logps/rejected": -1.4101346731185913,
454
- "loss": 0.5231,
455
- "rewards/accuracies": 0.731249988079071,
456
- "rewards/chosen": -1.1715632677078247,
457
- "rewards/margins": 0.8251334428787231,
458
- "rewards/rejected": -1.9966968297958374,
459
  "step": 290
460
  },
461
- {
462
- "epoch": 0.68,
463
- "grad_norm": 87.86932907500237,
464
- "learning_rate": 2.7566622175067443e-08,
465
- "logits/chosen": -1.7801201343536377,
466
- "logits/rejected": -1.7111116647720337,
467
- "logps/chosen": -1.3444006443023682,
468
- "logps/rejected": -1.494860053062439,
469
- "loss": 0.5308,
470
- "rewards/accuracies": 0.7124999761581421,
471
- "rewards/chosen": -1.3581712245941162,
472
- "rewards/margins": 0.7247291803359985,
473
- "rewards/rejected": -2.082900285720825,
474
- "step": 300
475
- },
476
- {
477
- "epoch": 0.71,
478
- "grad_norm": 101.39189225311264,
479
- "learning_rate": 2.408731201945432e-08,
480
- "logits/chosen": -1.77533757686615,
481
- "logits/rejected": -1.7264738082885742,
482
- "logps/chosen": -1.313521146774292,
483
- "logps/rejected": -1.4012606143951416,
484
- "loss": 0.5179,
485
- "rewards/accuracies": 0.768750011920929,
486
- "rewards/chosen": -1.1962579488754272,
487
- "rewards/margins": 0.6758478283882141,
488
- "rewards/rejected": -1.8721058368682861,
489
- "step": 310
490
- },
491
- {
492
- "epoch": 0.73,
493
- "grad_norm": 97.55308734787654,
494
- "learning_rate": 2.0771830220378112e-08,
495
- "logits/chosen": -1.7309653759002686,
496
- "logits/rejected": -1.673135757446289,
497
- "logps/chosen": -1.3468959331512451,
498
- "logps/rejected": -1.405505895614624,
499
- "loss": 0.5257,
500
- "rewards/accuracies": 0.7437499761581421,
501
- "rewards/chosen": -1.240114450454712,
502
- "rewards/margins": 0.6646026968955994,
503
- "rewards/rejected": -1.9047170877456665,
504
- "step": 320
505
- },
506
- {
507
- "epoch": 0.75,
508
- "grad_norm": 79.56679512217426,
509
- "learning_rate": 1.7641138321260257e-08,
510
- "logits/chosen": -1.7662233114242554,
511
- "logits/rejected": -1.6911147832870483,
512
- "logps/chosen": -1.3011645078659058,
513
- "logps/rejected": -1.418474555015564,
514
- "loss": 0.5175,
515
- "rewards/accuracies": 0.8125,
516
- "rewards/chosen": -1.1730973720550537,
517
- "rewards/margins": 0.8977500796318054,
518
- "rewards/rejected": -2.070847511291504,
519
- "step": 330
520
- },
521
- {
522
- "epoch": 0.77,
523
- "grad_norm": 78.95443991883164,
524
- "learning_rate": 1.4715029564277793e-08,
525
- "logits/chosen": -1.8377494812011719,
526
- "logits/rejected": -1.785035490989685,
527
- "logps/chosen": -1.3052855730056763,
528
- "logps/rejected": -1.4216973781585693,
529
- "loss": 0.535,
530
- "rewards/accuracies": 0.7562500238418579,
531
- "rewards/chosen": -1.0062506198883057,
532
- "rewards/margins": 0.7934825420379639,
533
- "rewards/rejected": -1.7997331619262695,
534
- "step": 340
535
- },
536
- {
537
- "epoch": 0.8,
538
- "grad_norm": 85.7404253023672,
539
- "learning_rate": 1.2012003751113343e-08,
540
- "logits/chosen": -1.8215789794921875,
541
- "logits/rejected": -1.7556202411651611,
542
- "logps/chosen": -1.2944856882095337,
543
- "logps/rejected": -1.427002191543579,
544
- "loss": 0.5037,
545
- "rewards/accuracies": 0.768750011920929,
546
- "rewards/chosen": -1.3231576681137085,
547
- "rewards/margins": 0.8232595324516296,
548
- "rewards/rejected": -2.1464171409606934,
549
- "step": 350
550
- },
551
- {
552
- "epoch": 0.82,
553
- "grad_norm": 81.80824877374742,
554
- "learning_rate": 9.549150281252633e-09,
555
- "logits/chosen": -1.7837785482406616,
556
- "logits/rejected": -1.7289537191390991,
557
- "logps/chosen": -1.2906091213226318,
558
- "logps/rejected": -1.4211328029632568,
559
- "loss": 0.5085,
560
- "rewards/accuracies": 0.731249988079071,
561
- "rewards/chosen": -1.1847398281097412,
562
- "rewards/margins": 0.7879296541213989,
563
- "rewards/rejected": -1.9726696014404297,
564
- "step": 360
565
- },
566
- {
567
- "epoch": 0.84,
568
- "grad_norm": 76.45542191386993,
569
- "learning_rate": 7.3420401072985306e-09,
570
- "logits/chosen": -1.8155953884124756,
571
- "logits/rejected": -1.7616589069366455,
572
- "logps/chosen": -1.3416122198104858,
573
- "logps/rejected": -1.4825923442840576,
574
- "loss": 0.512,
575
- "rewards/accuracies": 0.731249988079071,
576
- "rewards/chosen": -1.2699860334396362,
577
- "rewards/margins": 0.8351644277572632,
578
- "rewards/rejected": -2.1051506996154785,
579
- "step": 370
580
- },
581
- {
582
- "epoch": 0.86,
583
- "grad_norm": 100.61500636796463,
584
- "learning_rate": 5.404627290395369e-09,
585
- "logits/chosen": -1.7809003591537476,
586
- "logits/rejected": -1.7117547988891602,
587
- "logps/chosen": -1.299863338470459,
588
- "logps/rejected": -1.4180821180343628,
589
- "loss": 0.5139,
590
- "rewards/accuracies": 0.762499988079071,
591
- "rewards/chosen": -1.1025749444961548,
592
- "rewards/margins": 0.7822272777557373,
593
- "rewards/rejected": -1.884802222251892,
594
- "step": 380
595
- },
596
- {
597
- "epoch": 0.89,
598
- "grad_norm": 143.00077069598655,
599
- "learning_rate": 3.74916077816162e-09,
600
- "logits/chosen": -1.7901942729949951,
601
- "logits/rejected": -1.7254260778427124,
602
- "logps/chosen": -1.2824052572250366,
603
- "logps/rejected": -1.3659498691558838,
604
- "loss": 0.5278,
605
- "rewards/accuracies": 0.7124999761581421,
606
- "rewards/chosen": -1.326103925704956,
607
- "rewards/margins": 0.6741579174995422,
608
- "rewards/rejected": -2.0002617835998535,
609
- "step": 390
610
- },
611
- {
612
- "epoch": 0.91,
613
- "grad_norm": 86.78461219124101,
614
- "learning_rate": 2.386106962899165e-09,
615
- "logits/chosen": -1.7145483493804932,
616
- "logits/rejected": -1.6393749713897705,
617
- "logps/chosen": -1.3581172227859497,
618
- "logps/rejected": -1.4866178035736084,
619
- "loss": 0.5223,
620
- "rewards/accuracies": 0.7124999761581421,
621
- "rewards/chosen": -1.3169643878936768,
622
- "rewards/margins": 0.7216087579727173,
623
- "rewards/rejected": -2.0385732650756836,
624
- "step": 400
625
- },
626
- {
627
- "epoch": 0.93,
628
- "grad_norm": 97.03178172617403,
629
- "learning_rate": 1.3240835096913706e-09,
630
- "logits/chosen": -1.7489525079727173,
631
- "logits/rejected": -1.6416590213775635,
632
- "logps/chosen": -1.2802146673202515,
633
- "logps/rejected": -1.4399700164794922,
634
- "loss": 0.5334,
635
- "rewards/accuracies": 0.737500011920929,
636
- "rewards/chosen": -1.086639404296875,
637
- "rewards/margins": 0.9034107327461243,
638
- "rewards/rejected": -1.9900500774383545,
639
- "step": 410
640
- },
641
- {
642
- "epoch": 0.96,
643
- "grad_norm": 177.7844461853396,
644
- "learning_rate": 5.698048727497462e-10,
645
- "logits/chosen": -1.7718130350112915,
646
- "logits/rejected": -1.6983397006988525,
647
- "logps/chosen": -1.3178266286849976,
648
- "logps/rejected": -1.4721015691757202,
649
- "loss": 0.5219,
650
- "rewards/accuracies": 0.7875000238418579,
651
- "rewards/chosen": -1.2652876377105713,
652
- "rewards/margins": 0.8731800317764282,
653
- "rewards/rejected": -2.138467788696289,
654
- "step": 420
655
- },
656
- {
657
- "epoch": 0.98,
658
- "grad_norm": 191.4766363479098,
659
- "learning_rate": 1.2803984447259387e-10,
660
- "logits/chosen": -1.7818502187728882,
661
- "logits/rejected": -1.721131682395935,
662
- "logps/chosen": -1.3105113506317139,
663
- "logps/rejected": -1.4450818300247192,
664
- "loss": 0.5113,
665
- "rewards/accuracies": 0.7250000238418579,
666
- "rewards/chosen": -1.2817203998565674,
667
- "rewards/margins": 0.869175910949707,
668
- "rewards/rejected": -2.1508963108062744,
669
- "step": 430
670
- },
671
  {
672
  "epoch": 1.0,
673
- "step": 439,
674
  "total_flos": 0.0,
675
- "train_loss": 0.5564739256078942,
676
- "train_runtime": 6859.6403,
677
- "train_samples_per_second": 8.198,
678
- "train_steps_per_second": 0.064
679
  }
680
  ],
681
  "logging_steps": 10,
682
- "max_steps": 439,
683
  "num_input_tokens_seen": 0,
684
  "num_train_epochs": 1,
685
  "save_steps": 100,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9991645781119465,
5
  "eval_steps": 10000000,
6
+ "global_step": 299,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 94.51455806402136,
14
+ "learning_rate": 3.333333333333333e-10,
15
+ "logits/chosen": -1.693521499633789,
16
+ "logits/rejected": -1.6753541231155396,
17
+ "logps/chosen": -1.041430115699768,
18
+ "logps/rejected": -0.9273841977119446,
19
+ "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.03,
28
+ "grad_norm": 100.10048800952612,
29
+ "learning_rate": 3.3333333333333334e-09,
30
+ "logits/chosen": -1.7426362037658691,
31
+ "logits/rejected": -1.7463488578796387,
32
+ "logps/chosen": -1.0522818565368652,
33
+ "logps/rejected": -1.0174607038497925,
34
  "loss": 0.6933,
35
+ "rewards/accuracies": 0.4722222089767456,
36
+ "rewards/chosen": -0.0006895202095620334,
37
+ "rewards/margins": -0.0014206942869350314,
38
+ "rewards/rejected": 0.0007311741355806589,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.07,
43
+ "grad_norm": 100.2747717498339,
44
+ "learning_rate": 6.666666666666667e-09,
45
+ "logits/chosen": -1.876529335975647,
46
+ "logits/rejected": -1.8286478519439697,
47
+ "logps/chosen": -1.0717421770095825,
48
+ "logps/rejected": -1.0434354543685913,
49
+ "loss": 0.6934,
50
+ "rewards/accuracies": 0.53125,
51
+ "rewards/chosen": 0.0013453494757413864,
52
+ "rewards/margins": 0.001054912805557251,
53
+ "rewards/rejected": 0.00029043667018413544,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.1,
58
+ "grad_norm": 91.78075458726622,
59
+ "learning_rate": 1e-08,
60
+ "logits/chosen": -1.7869594097137451,
61
+ "logits/rejected": -1.7599306106567383,
62
+ "logps/chosen": -1.066955327987671,
63
+ "logps/rejected": -1.0448154211044312,
64
+ "loss": 0.6926,
65
+ "rewards/accuracies": 0.5562499761581421,
66
+ "rewards/chosen": 0.0009901206940412521,
67
+ "rewards/margins": 0.005651786923408508,
68
+ "rewards/rejected": -0.004661666229367256,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.13,
73
+ "grad_norm": 83.1668176080734,
74
+ "learning_rate": 9.96594024562513e-09,
75
+ "logits/chosen": -1.8331626653671265,
76
+ "logits/rejected": -1.8235479593276978,
77
+ "logps/chosen": -1.069947361946106,
78
+ "logps/rejected": -1.0194944143295288,
79
+ "loss": 0.6933,
80
+ "rewards/accuracies": 0.5625,
81
+ "rewards/chosen": 0.00406806543469429,
82
+ "rewards/margins": 0.006294439546763897,
83
+ "rewards/rejected": -0.0022263741120696068,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.17,
88
+ "grad_norm": 97.3484905394127,
89
+ "learning_rate": 9.86422500924775e-09,
90
+ "logits/chosen": -1.8598533868789673,
91
+ "logits/rejected": -1.8249794244766235,
92
+ "logps/chosen": -1.0844773054122925,
93
+ "logps/rejected": -1.0546021461486816,
94
+ "loss": 0.6928,
95
+ "rewards/accuracies": 0.5,
96
+ "rewards/chosen": 0.0010452494025230408,
97
+ "rewards/margins": 0.003718096762895584,
98
+ "rewards/rejected": -0.0026728473603725433,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.2,
103
+ "grad_norm": 89.15270505321199,
104
+ "learning_rate": 9.696240049254743e-09,
105
+ "logits/chosen": -1.8162918090820312,
106
+ "logits/rejected": -1.8095808029174805,
107
+ "logps/chosen": -1.1147956848144531,
108
+ "logps/rejected": -1.10584557056427,
109
+ "loss": 0.692,
110
+ "rewards/accuracies": 0.574999988079071,
111
+ "rewards/chosen": 0.003025440499186516,
112
+ "rewards/margins": 0.006887376308441162,
113
+ "rewards/rejected": -0.0038619358092546463,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.23,
118
+ "grad_norm": 92.66102585547152,
119
+ "learning_rate": 9.464273976236516e-09,
120
+ "logits/chosen": -1.7991975545883179,
121
+ "logits/rejected": -1.7613614797592163,
122
+ "logps/chosen": -1.0598714351654053,
123
+ "logps/rejected": -1.0530080795288086,
124
+ "loss": 0.6927,
125
+ "rewards/accuracies": 0.5562499761581421,
126
+ "rewards/chosen": 0.0042045507580041885,
127
+ "rewards/margins": 0.0019652042537927628,
128
+ "rewards/rejected": 0.002239346969872713,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.27,
133
+ "grad_norm": 194.03821825764928,
134
+ "learning_rate": 9.171487073181198e-09,
135
+ "logits/chosen": -1.8440383672714233,
136
+ "logits/rejected": -1.8225574493408203,
137
+ "logps/chosen": -1.0219953060150146,
138
+ "logps/rejected": -1.0044893026351929,
139
+ "loss": 0.6914,
140
+ "rewards/accuracies": 0.625,
141
+ "rewards/chosen": 0.0038851741701364517,
142
+ "rewards/margins": 0.009898573160171509,
143
+ "rewards/rejected": -0.006013398990035057,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.3,
148
+ "grad_norm": 89.9314461991181,
149
+ "learning_rate": 8.821868240089676e-09,
150
+ "logits/chosen": -1.8029550313949585,
151
+ "logits/rejected": -1.7841434478759766,
152
+ "logps/chosen": -1.045693039894104,
153
+ "logps/rejected": -1.0079419612884521,
154
+ "loss": 0.6912,
155
+ "rewards/accuracies": 0.48124998807907104,
156
+ "rewards/chosen": 0.005337424576282501,
157
+ "rewards/margins": 0.004554492421448231,
158
+ "rewards/rejected": 0.0007829321548342705,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.33,
163
+ "grad_norm": 100.68694545007727,
164
+ "learning_rate": 8.42018064959393e-09,
165
+ "logits/chosen": -1.8531291484832764,
166
+ "logits/rejected": -1.8357194662094116,
167
+ "logps/chosen": -1.078748345375061,
168
+ "logps/rejected": -1.0813863277435303,
169
+ "loss": 0.6903,
170
+ "rewards/accuracies": 0.606249988079071,
171
+ "rewards/chosen": 0.007160305976867676,
172
+ "rewards/margins": 0.007577991578727961,
173
+ "rewards/rejected": -0.0004176851361989975,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.37,
178
+ "grad_norm": 91.99753152902545,
179
+ "learning_rate": 7.971896853961043e-09,
180
+ "logits/chosen": -1.8346866369247437,
181
+ "logits/rejected": -1.8052761554718018,
182
+ "logps/chosen": -1.0582704544067383,
183
+ "logps/rejected": -1.0159928798675537,
184
+ "loss": 0.6901,
185
+ "rewards/accuracies": 0.550000011920929,
186
+ "rewards/chosen": -0.0007529235444962978,
187
+ "rewards/margins": 0.0036198147572577,
188
+ "rewards/rejected": -0.004372738301753998,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.4,
193
+ "grad_norm": 76.03854240063603,
194
+ "learning_rate": 7.48312422757881e-09,
195
+ "logits/chosen": -1.8829187154769897,
196
+ "logits/rejected": -1.8501228094100952,
197
+ "logps/chosen": -1.0292680263519287,
198
+ "logps/rejected": -1.019852876663208,
199
+ "loss": 0.6887,
200
+ "rewards/accuracies": 0.6875,
201
+ "rewards/chosen": 0.00865122489631176,
202
+ "rewards/margins": 0.011260826140642166,
203
+ "rewards/rejected": -0.0026096017099916935,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.43,
208
+ "grad_norm": 91.54881621872774,
209
+ "learning_rate": 6.96052176068713e-09,
210
+ "logits/chosen": -1.7626311779022217,
211
+ "logits/rejected": -1.7299768924713135,
212
+ "logps/chosen": -1.0229580402374268,
213
+ "logps/rejected": -1.0213210582733154,
214
+ "loss": 0.6874,
215
+ "rewards/accuracies": 0.65625,
216
+ "rewards/chosen": 0.010313736274838448,
217
+ "rewards/margins": 0.01158633828163147,
218
+ "rewards/rejected": -0.0012726020067930222,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.47,
223
+ "grad_norm": 88.02230042083127,
224
+ "learning_rate": 6.4112093379492135e-09,
225
+ "logits/chosen": -1.796229600906372,
226
+ "logits/rejected": -1.7824671268463135,
227
+ "logps/chosen": -1.078906774520874,
228
+ "logps/rejected": -1.0356519222259521,
229
+ "loss": 0.6887,
230
+ "rewards/accuracies": 0.65625,
231
+ "rewards/chosen": 0.015195205807685852,
232
+ "rewards/margins": 0.011107077822089195,
233
+ "rewards/rejected": 0.004088127985596657,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.5,
238
+ "grad_norm": 89.70971937959732,
239
+ "learning_rate": 5.842670737842467e-09,
240
+ "logits/chosen": -1.8206145763397217,
241
+ "logits/rejected": -1.788490891456604,
242
+ "logps/chosen": -1.0580933094024658,
243
+ "logps/rejected": -1.0225986242294312,
244
+ "loss": 0.6877,
245
+ "rewards/accuracies": 0.5625,
246
+ "rewards/chosen": 0.004929685965180397,
247
+ "rewards/margins": 0.005628877319395542,
248
+ "rewards/rejected": -0.0006991913542151451,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.53,
253
+ "grad_norm": 81.17857900193553,
254
+ "learning_rate": 5.262651674395798e-09,
255
+ "logits/chosen": -1.8310163021087646,
256
+ "logits/rejected": -1.8322261571884155,
257
+ "logps/chosen": -1.0157067775726318,
258
+ "logps/rejected": -1.0004805326461792,
259
+ "loss": 0.6876,
260
+ "rewards/accuracies": 0.6187499761581421,
261
+ "rewards/chosen": 0.010493971407413483,
262
+ "rewards/margins": 0.013428708538413048,
263
+ "rewards/rejected": -0.0029347380623221397,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.57,
268
+ "grad_norm": 87.11260929204627,
269
+ "learning_rate": 4.679054270342702e-09,
270
+ "logits/chosen": -1.8449046611785889,
271
+ "logits/rejected": -1.7946765422821045,
272
+ "logps/chosen": -1.0548999309539795,
273
+ "logps/rejected": -1.051992654800415,
274
+ "loss": 0.6869,
275
+ "rewards/accuracies": 0.6937500238418579,
276
+ "rewards/chosen": 0.010436683893203735,
277
+ "rewards/margins": 0.010068513453006744,
278
+ "rewards/rejected": 0.00036817044019699097,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.6,
283
+ "grad_norm": 93.41415146032115,
284
+ "learning_rate": 4.099829399377524e-09,
285
+ "logits/chosen": -1.8277971744537354,
286
+ "logits/rejected": -1.7856277227401733,
287
+ "logps/chosen": -1.0608714818954468,
288
+ "logps/rejected": -1.0331629514694214,
289
+ "loss": 0.6855,
290
+ "rewards/accuracies": 0.6000000238418579,
291
+ "rewards/chosen": 0.015542459674179554,
292
+ "rewards/margins": 0.01706361211836338,
293
+ "rewards/rejected": -0.0015211515128612518,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.63,
298
+ "grad_norm": 91.20164752256586,
299
+ "learning_rate": 3.532868364233416e-09,
300
+ "logits/chosen": -1.8144668340682983,
301
+ "logits/rejected": -1.7934455871582031,
302
+ "logps/chosen": -1.0488895177841187,
303
+ "logps/rejected": -1.0484153032302856,
304
+ "loss": 0.6869,
305
+ "rewards/accuracies": 0.643750011920929,
306
+ "rewards/chosen": 0.015009616501629353,
307
+ "rewards/margins": 0.011918185278773308,
308
+ "rewards/rejected": 0.00309143029153347,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.67,
313
+ "grad_norm": 103.72123679470863,
314
+ "learning_rate": 2.985895386349233e-09,
315
+ "logits/chosen": -1.783926248550415,
316
+ "logits/rejected": -1.7509727478027344,
317
+ "logps/chosen": -1.033827543258667,
318
+ "logps/rejected": -1.0074162483215332,
319
+ "loss": 0.6856,
320
+ "rewards/accuracies": 0.6499999761581421,
321
+ "rewards/chosen": 0.018476296216249466,
322
+ "rewards/margins": 0.019362105056643486,
323
+ "rewards/rejected": -0.0008858110522851348,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.7,
328
+ "grad_norm": 86.58629109761597,
329
+ "learning_rate": 2.4663623718355446e-09,
330
+ "logits/chosen": -1.842024803161621,
331
+ "logits/rejected": -1.8078495264053345,
332
+ "logps/chosen": -1.0763428211212158,
333
+ "logps/rejected": -1.0434763431549072,
334
+ "loss": 0.6853,
335
+ "rewards/accuracies": 0.6625000238418579,
336
+ "rewards/chosen": 0.018699195235967636,
337
+ "rewards/margins": 0.017539886757731438,
338
+ "rewards/rejected": 0.0011593066155910492,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.74,
343
+ "grad_norm": 94.3943636617196,
344
+ "learning_rate": 1.9813473874379397e-09,
345
+ "logits/chosen": -1.732317328453064,
346
+ "logits/rejected": -1.7312743663787842,
347
+ "logps/chosen": -1.073425054550171,
348
+ "logps/rejected": -1.0629937648773193,
349
+ "loss": 0.6847,
350
+ "rewards/accuracies": 0.6312500238418579,
351
+ "rewards/chosen": 0.013539738953113556,
352
+ "rewards/margins": 0.013838117942214012,
353
+ "rewards/rejected": -0.00029837898910045624,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.77,
358
+ "grad_norm": 92.64370783949347,
359
+ "learning_rate": 1.5374582296511053e-09,
360
+ "logits/chosen": -1.7242523431777954,
361
+ "logits/rejected": -1.6965806484222412,
362
+ "logps/chosen": -1.0366003513336182,
363
+ "logps/rejected": -0.9941840171813965,
364
+ "loss": 0.6857,
365
+ "rewards/accuracies": 0.625,
366
+ "rewards/chosen": 0.021335098892450333,
367
+ "rewards/margins": 0.015094568021595478,
368
+ "rewards/rejected": 0.006240529473870993,
369
  "step": 230
370
  },
371
  {
372
+ "epoch": 0.8,
373
+ "grad_norm": 91.14853265672438,
374
+ "learning_rate": 1.1407424007485927e-09,
375
+ "logits/chosen": -1.881166696548462,
376
+ "logits/rejected": -1.8595256805419922,
377
+ "logps/chosen": -1.0768239498138428,
378
+ "logps/rejected": -1.0254974365234375,
379
+ "loss": 0.6848,
380
+ "rewards/accuracies": 0.7250000238418579,
381
+ "rewards/chosen": 0.021426241844892502,
382
+ "rewards/margins": 0.021464312449097633,
383
+ "rewards/rejected": -3.8067344576120377e-05,
384
  "step": 240
385
  },
386
  {
387
+ "epoch": 0.84,
388
+ "grad_norm": 88.07457325362287,
389
+ "learning_rate": 7.966047182060226e-10,
390
+ "logits/chosen": -1.8788058757781982,
391
+ "logits/rejected": -1.881800889968872,
392
+ "logps/chosen": -1.0638504028320312,
393
+ "logps/rejected": -1.028044581413269,
394
+ "loss": 0.6828,
395
+ "rewards/accuracies": 0.637499988079071,
396
+ "rewards/chosen": 0.018116505816578865,
397
+ "rewards/margins": 0.015344863757491112,
398
+ "rewards/rejected": 0.0027716427575796843,
399
  "step": 250
400
  },
401
  {
402
+ "epoch": 0.87,
403
+ "grad_norm": 99.87255814500503,
404
+ "learning_rate": 5.097336799988067e-10,
405
+ "logits/chosen": -1.8842456340789795,
406
+ "logits/rejected": -1.8642467260360718,
407
+ "logps/chosen": -1.0772615671157837,
408
+ "logps/rejected": -1.0665724277496338,
409
+ "loss": 0.6848,
410
+ "rewards/accuracies": 0.675000011920929,
411
+ "rewards/chosen": 0.023015262559056282,
412
+ "rewards/margins": 0.02001366578042507,
413
+ "rewards/rejected": 0.0030015967786312103,
414
  "step": 260
415
  },
416
  {
417
+ "epoch": 0.9,
418
+ "grad_norm": 107.74820920122671,
419
+ "learning_rate": 2.840375889663871e-10,
420
+ "logits/chosen": -1.8729417324066162,
421
+ "logits/rejected": -1.8372119665145874,
422
+ "logps/chosen": -0.9936866760253906,
423
+ "logps/rejected": -1.0166294574737549,
424
+ "loss": 0.6847,
425
+ "rewards/accuracies": 0.7250000238418579,
426
+ "rewards/chosen": 0.01751965843141079,
427
+ "rewards/margins": 0.02036522701382637,
428
+ "rewards/rejected": -0.0028455646242946386,
429
  "step": 270
430
  },
431
  {
432
+ "epoch": 0.94,
433
+ "grad_norm": 87.56807066875254,
434
+ "learning_rate": 1.2259130647833627e-10,
435
+ "logits/chosen": -1.7625566720962524,
436
+ "logits/rejected": -1.7224514484405518,
437
+ "logps/chosen": -1.093827724456787,
438
+ "logps/rejected": -1.0462143421173096,
439
+ "loss": 0.6834,
440
+ "rewards/accuracies": 0.6187499761581421,
441
+ "rewards/chosen": 0.01200016774237156,
442
+ "rewards/margins": 0.019106844440102577,
443
+ "rewards/rejected": -0.0071066757664084435,
444
  "step": 280
445
  },
446
  {
447
+ "epoch": 0.97,
448
+ "grad_norm": 81.16688874976322,
449
+ "learning_rate": 2.7594360825166643e-11,
450
+ "logits/chosen": -1.780118703842163,
451
+ "logits/rejected": -1.7672898769378662,
452
+ "logps/chosen": -1.0616685152053833,
453
+ "logps/rejected": -1.0320245027542114,
454
+ "loss": 0.6845,
455
+ "rewards/accuracies": 0.6499999761581421,
456
+ "rewards/chosen": 0.028133947402238846,
457
+ "rewards/margins": 0.020768892019987106,
458
+ "rewards/rejected": 0.007365054450929165,
459
  "step": 290
460
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
  {
462
  "epoch": 1.0,
463
+ "step": 299,
464
  "total_flos": 0.0,
465
+ "train_loss": 0.6880149315033469,
466
+ "train_runtime": 4603.368,
467
+ "train_samples_per_second": 8.317,
468
+ "train_steps_per_second": 0.065
469
  }
470
  ],
471
  "logging_steps": 10,
472
+ "max_steps": 299,
473
  "num_input_tokens_seen": 0,
474
  "num_train_epochs": 1,
475
  "save_steps": 100,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26744db2cfaad1ba924f198a0f7777d66f0f42a1cfd7425001f46a0c8be3dcff
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fc17d04ff8999b784673094bfc94f340bf1b82c104ffe64c18c7d2f2ea8b9f4
3
+ size 6328