RikkiXu commited on
Commit
47cc9fb
1 Parent(s): 2717d40

Model save

Browse files
README.md CHANGED
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # zephyr-7b-dpo-full
16
 
17
- This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-DPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-DPO) on the None dataset.
18
 
19
  ## Model description
20
 
@@ -55,5 +55,5 @@ The following hyperparameters were used during training:
55
 
56
  - Transformers 4.39.3
57
  - Pytorch 2.1.2+cu118
58
- - Datasets 2.16.1
59
  - Tokenizers 0.15.2
 
14
 
15
  # zephyr-7b-dpo-full
16
 
17
+ This model is a fine-tuned version of [princeton-nlp/Mistral-7B-Base-SFT-DPO](https://huggingface.co/princeton-nlp/Mistral-7B-Base-SFT-DPO) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
55
 
56
  - Transformers 4.39.3
57
  - Pytorch 2.1.2+cu118
58
+ - Datasets 2.19.1
59
  - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6121151776493344,
4
- "train_runtime": 6995.9878,
5
- "train_samples": 61134,
6
- "train_samples_per_second": 8.738,
7
  "train_steps_per_second": 0.034
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.2186826115846634,
4
+ "train_runtime": 11940.2176,
5
+ "train_samples": 102360,
6
+ "train_samples_per_second": 8.573,
7
  "train_steps_per_second": 0.034
8
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4cfba4b2bccd53078b6689bd3a353c0c6b8f8f63f7dd05990329ce2a4cd3dc2
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a118ac948197a3bbfd249f1956387edf3828fdc2a6cc414bf8ce3c3e7a6e2e3
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6caee221211932c0d895c01ccf79dca18c9e4d8f596c8c6e5c470beb9f1daa58
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a70e605bdd6f68144e056258758c5577009cb50b28c0583f8d451060cb6684
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d173bc5999339b83eaf9e08e87fad34084acdec72038e115d114762c5c0c29b
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fc90fce317ef012990a74b3c58b0b4875f221d5b4f150cec69f3ed0b9db0134
3
  size 4540516344
runs/Jun21_01-05-25_n136-112-146/events.out.tfevents.1718904669.n136-112-146.2741958.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ed007010837d015a2a56714b1fa3e8b2d231c6bed51b5b6465f4805ec74b665
3
- size 32982
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f043138c85c0913a83007d89e74a3e8aa588674d22499745c700ebc6897955f7
3
+ size 33336
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6121151776493344,
4
- "train_runtime": 6995.9878,
5
- "train_samples": 61134,
6
- "train_samples_per_second": 8.738,
7
  "train_steps_per_second": 0.034
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.2186826115846634,
4
+ "train_runtime": 11940.2176,
5
+ "train_samples": 102360,
6
+ "train_samples_per_second": 8.573,
7
  "train_steps_per_second": 0.034
8
  }
trainer_state.json CHANGED
@@ -3,19 +3,19 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 239,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
- "grad_norm": 1228.6970114702256,
14
- "learning_rate": 2.083333333333333e-08,
15
- "logits/chosen": -0.5983926653862,
16
- "logits/rejected": -0.3142164349555969,
17
- "logps/chosen": -366.9183349609375,
18
- "logps/rejected": -437.1544189453125,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -24,362 +24,617 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.04,
28
- "grad_norm": 550.4800163195696,
29
- "learning_rate": 2.0833333333333333e-07,
30
- "logits/chosen": -0.3194640576839447,
31
- "logits/rejected": 0.544389009475708,
32
- "logps/chosen": -343.9505920410156,
33
- "logps/rejected": -398.5333557128906,
34
- "loss": 0.6237,
35
- "rewards/accuracies": 0.5520833134651184,
36
- "rewards/chosen": -0.3209317922592163,
37
- "rewards/margins": 0.3546760380268097,
38
- "rewards/rejected": -0.6756078600883484,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.08,
43
- "grad_norm": 703.9089332225451,
44
- "learning_rate": 4.1666666666666667e-07,
45
- "logits/chosen": -0.16047191619873047,
46
- "logits/rejected": 0.8960522413253784,
47
- "logps/chosen": -363.82220458984375,
48
- "logps/rejected": -391.59588623046875,
49
- "loss": 0.7373,
50
- "rewards/accuracies": 0.784375011920929,
51
- "rewards/chosen": -4.046866416931152,
52
- "rewards/margins": 3.9459426403045654,
53
- "rewards/rejected": -7.992809295654297,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.13,
58
- "grad_norm": 552.799882218425,
59
- "learning_rate": 4.990398100856366e-07,
60
- "logits/chosen": -0.1726907640695572,
61
- "logits/rejected": 0.5421887636184692,
62
- "logps/chosen": -354.1240234375,
63
- "logps/rejected": -437.666259765625,
64
- "loss": 0.7316,
65
- "rewards/accuracies": 0.8062499761581421,
66
- "rewards/chosen": -2.8292102813720703,
67
- "rewards/margins": 4.830966949462891,
68
- "rewards/rejected": -7.660176753997803,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.17,
73
- "grad_norm": 533.313986686025,
74
- "learning_rate": 4.931986719649298e-07,
75
- "logits/chosen": -0.4836675524711609,
76
- "logits/rejected": 0.30907225608825684,
77
- "logps/chosen": -369.8108215332031,
78
- "logps/rejected": -389.89508056640625,
79
- "loss": 0.6541,
80
- "rewards/accuracies": 0.7749999761581421,
81
- "rewards/chosen": -1.1337063312530518,
82
- "rewards/margins": 3.0231781005859375,
83
- "rewards/rejected": -4.15688419342041,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.21,
88
- "grad_norm": 556.3373380132485,
89
- "learning_rate": 4.821741763807186e-07,
90
- "logits/chosen": -0.36255133152008057,
91
- "logits/rejected": 0.5106185674667358,
92
- "logps/chosen": -335.77191162109375,
93
- "logps/rejected": -392.73406982421875,
94
- "loss": 0.593,
95
- "rewards/accuracies": 0.8218749761581421,
96
- "rewards/chosen": -0.6605129837989807,
97
- "rewards/margins": 3.997671604156494,
98
- "rewards/rejected": -4.658184051513672,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.25,
103
- "grad_norm": 587.3266847652458,
104
- "learning_rate": 4.662012913161997e-07,
105
- "logits/chosen": -0.48477110266685486,
106
- "logits/rejected": 0.4101050794124603,
107
- "logps/chosen": -372.4800109863281,
108
- "logps/rejected": -392.51702880859375,
109
- "loss": 0.665,
110
- "rewards/accuracies": 0.7593749761581421,
111
- "rewards/chosen": -1.5650008916854858,
112
- "rewards/margins": 3.357562303543091,
113
- "rewards/rejected": -4.922562599182129,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.29,
118
- "grad_norm": 466.5579056975509,
119
- "learning_rate": 4.456204510851956e-07,
120
- "logits/chosen": -0.5015081167221069,
121
- "logits/rejected": 0.16009679436683655,
122
- "logps/chosen": -373.043212890625,
123
- "logps/rejected": -423.36041259765625,
124
- "loss": 0.5988,
125
- "rewards/accuracies": 0.8125,
126
- "rewards/chosen": -2.6556434631347656,
127
- "rewards/margins": 3.99314546585083,
128
- "rewards/rejected": -6.648789405822754,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.33,
133
- "grad_norm": 569.6628049875762,
134
- "learning_rate": 4.2087030056579986e-07,
135
- "logits/chosen": -0.6794461607933044,
136
- "logits/rejected": 0.288917601108551,
137
- "logps/chosen": -352.2886657714844,
138
- "logps/rejected": -395.7070617675781,
139
- "loss": 0.6693,
140
- "rewards/accuracies": 0.7437499761581421,
141
- "rewards/chosen": -2.3514175415039062,
142
- "rewards/margins": 3.268317699432373,
143
- "rewards/rejected": -5.619735240936279,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.38,
148
- "grad_norm": 514.8591353074631,
149
- "learning_rate": 3.9247834624635404e-07,
150
- "logits/chosen": -0.302684485912323,
151
- "logits/rejected": 0.4978007674217224,
152
- "logps/chosen": -331.9069519042969,
153
- "logps/rejected": -362.5081787109375,
154
- "loss": 0.6574,
155
- "rewards/accuracies": 0.7749999761581421,
156
- "rewards/chosen": -2.7024354934692383,
157
- "rewards/margins": 3.1714892387390137,
158
- "rewards/rejected": -5.873924732208252,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.42,
163
- "grad_norm": 552.3368165213882,
164
- "learning_rate": 3.610497133404795e-07,
165
- "logits/chosen": -0.3550896942615509,
166
- "logits/rejected": 0.3170868754386902,
167
- "logps/chosen": -339.351318359375,
168
- "logps/rejected": -385.2740173339844,
169
- "loss": 0.6296,
170
- "rewards/accuracies": 0.765625,
171
- "rewards/chosen": -1.6958658695220947,
172
- "rewards/margins": 3.125446319580078,
173
- "rewards/rejected": -4.821312427520752,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.46,
178
- "grad_norm": 538.8081508757055,
179
- "learning_rate": 3.272542485937368e-07,
180
- "logits/chosen": -0.3151847720146179,
181
- "logits/rejected": 0.5818986296653748,
182
- "logps/chosen": -359.85504150390625,
183
- "logps/rejected": -380.2372741699219,
184
- "loss": 0.6323,
185
- "rewards/accuracies": 0.800000011920929,
186
- "rewards/chosen": -2.4269652366638184,
187
- "rewards/margins": 3.6542327404022217,
188
- "rewards/rejected": -6.081197738647461,
189
  "step": 110
190
  },
191
  {
192
- "epoch": 0.5,
193
- "grad_norm": 480.1882844894413,
194
- "learning_rate": 2.9181224366319943e-07,
195
- "logits/chosen": -0.3534928262233734,
196
- "logits/rejected": 0.37930893898010254,
197
- "logps/chosen": -339.82684326171875,
198
- "logps/rejected": -385.6717834472656,
199
- "loss": 0.6157,
200
- "rewards/accuracies": 0.793749988079071,
201
- "rewards/chosen": -3.0394375324249268,
202
- "rewards/margins": 3.763584852218628,
203
- "rewards/rejected": -6.803022861480713,
204
  "step": 120
205
  },
206
  {
207
- "epoch": 0.54,
208
- "grad_norm": 440.0988758060072,
209
- "learning_rate": 2.55479083351317e-07,
210
- "logits/chosen": -0.4240172505378723,
211
- "logits/rejected": 0.5124756097793579,
212
- "logps/chosen": -373.0182189941406,
213
- "logps/rejected": -388.32049560546875,
214
- "loss": 0.6406,
215
- "rewards/accuracies": 0.7875000238418579,
216
- "rewards/chosen": -1.8788728713989258,
217
- "rewards/margins": 3.37638521194458,
218
- "rewards/rejected": -5.255258560180664,
219
  "step": 130
220
  },
221
  {
222
- "epoch": 0.59,
223
- "grad_norm": 567.7119079444957,
224
- "learning_rate": 2.19029145890313e-07,
225
- "logits/chosen": -0.40219712257385254,
226
- "logits/rejected": 0.43135371804237366,
227
- "logps/chosen": -348.668701171875,
228
- "logps/rejected": -383.73431396484375,
229
- "loss": 0.6295,
230
- "rewards/accuracies": 0.784375011920929,
231
- "rewards/chosen": -2.540496826171875,
232
- "rewards/margins": 3.5945935249328613,
233
- "rewards/rejected": -6.135090351104736,
234
  "step": 140
235
  },
236
  {
237
- "epoch": 0.63,
238
- "grad_norm": 580.9997311687505,
239
- "learning_rate": 1.8323929841460178e-07,
240
- "logits/chosen": -0.45832833647727966,
241
- "logits/rejected": 0.4165743887424469,
242
- "logps/chosen": -356.80035400390625,
243
- "logps/rejected": -386.0637512207031,
244
- "loss": 0.6079,
245
- "rewards/accuracies": 0.765625,
246
- "rewards/chosen": -2.680931568145752,
247
- "rewards/margins": 3.39042592048645,
248
- "rewards/rejected": -6.071357250213623,
249
  "step": 150
250
  },
251
  {
252
- "epoch": 0.67,
253
- "grad_norm": 571.50548834413,
254
- "learning_rate": 1.488723393865766e-07,
255
- "logits/chosen": -0.2687947750091553,
256
- "logits/rejected": 0.5257094502449036,
257
- "logps/chosen": -372.47467041015625,
258
- "logps/rejected": -392.5708923339844,
259
- "loss": 0.5822,
260
- "rewards/accuracies": 0.78125,
261
- "rewards/chosen": -2.8401567935943604,
262
- "rewards/margins": 3.3291401863098145,
263
- "rewards/rejected": -6.169297218322754,
264
  "step": 160
265
  },
266
  {
267
- "epoch": 0.71,
268
- "grad_norm": 635.1524720165668,
269
- "learning_rate": 1.1666074087171627e-07,
270
- "logits/chosen": -0.4828396439552307,
271
- "logits/rejected": 0.25855886936187744,
272
- "logps/chosen": -358.3913879394531,
273
- "logps/rejected": -405.04833984375,
274
- "loss": 0.5831,
275
- "rewards/accuracies": 0.765625,
276
- "rewards/chosen": -2.5792622566223145,
277
- "rewards/margins": 3.4110665321350098,
278
- "rewards/rejected": -5.990328788757324,
279
  "step": 170
280
  },
281
  {
282
- "epoch": 0.75,
283
- "grad_norm": 475.58804797511704,
284
- "learning_rate": 8.729103716819111e-08,
285
- "logits/chosen": -0.4832540452480316,
286
- "logits/rejected": 0.45813870429992676,
287
- "logps/chosen": -360.1710510253906,
288
- "logps/rejected": -385.29180908203125,
289
- "loss": 0.5327,
290
- "rewards/accuracies": 0.796875,
291
- "rewards/chosen": -3.303698778152466,
292
- "rewards/margins": 3.9217076301574707,
293
- "rewards/rejected": -7.225406646728516,
294
  "step": 180
295
  },
296
  {
297
- "epoch": 0.79,
298
- "grad_norm": 505.1938261302253,
299
- "learning_rate": 6.138919252022435e-08,
300
- "logits/chosen": -0.2969876229763031,
301
- "logits/rejected": 0.3868524134159088,
302
- "logps/chosen": -341.55084228515625,
303
- "logps/rejected": -388.48858642578125,
304
- "loss": 0.6088,
305
- "rewards/accuracies": 0.815625011920929,
306
- "rewards/chosen": -3.5507800579071045,
307
- "rewards/margins": 3.956249713897705,
308
- "rewards/rejected": -7.507031440734863,
309
  "step": 190
310
  },
311
  {
312
- "epoch": 0.84,
313
- "grad_norm": 538.707594486278,
314
- "learning_rate": 3.9507259776993954e-08,
315
- "logits/chosen": -0.4874555170536041,
316
- "logits/rejected": 0.24319347739219666,
317
- "logps/chosen": -368.9916687011719,
318
- "logps/rejected": -413.5546875,
319
- "loss": 0.5485,
320
- "rewards/accuracies": 0.78125,
321
- "rewards/chosen": -2.9248712062835693,
322
- "rewards/margins": 3.447073459625244,
323
- "rewards/rejected": -6.371943950653076,
324
  "step": 200
325
  },
326
  {
327
- "epoch": 0.88,
328
- "grad_norm": 653.4659715305337,
329
- "learning_rate": 2.2111614344599684e-08,
330
- "logits/chosen": -0.5115900635719299,
331
- "logits/rejected": 0.3286168575286865,
332
- "logps/chosen": -377.30523681640625,
333
- "logps/rejected": -402.25811767578125,
334
- "loss": 0.5517,
335
- "rewards/accuracies": 0.793749988079071,
336
- "rewards/chosen": -2.5977110862731934,
337
- "rewards/margins": 3.4149317741394043,
338
- "rewards/rejected": -6.012642860412598,
339
  "step": 210
340
  },
341
  {
342
- "epoch": 0.92,
343
- "grad_norm": 509.6499303160426,
344
- "learning_rate": 9.57301420397924e-09,
345
- "logits/chosen": -0.5053508877754211,
346
- "logits/rejected": 0.2792305648326874,
347
- "logps/chosen": -361.4806213378906,
348
- "logps/rejected": -397.5937805175781,
349
- "loss": 0.5519,
350
- "rewards/accuracies": 0.8031250238418579,
351
- "rewards/chosen": -2.464998960494995,
352
- "rewards/margins": 3.379429578781128,
353
- "rewards/rejected": -5.844428062438965,
354
  "step": 220
355
  },
356
  {
357
- "epoch": 0.96,
358
- "grad_norm": 515.045513417983,
359
- "learning_rate": 2.158697848236607e-09,
360
- "logits/chosen": -0.4747482240200043,
361
- "logits/rejected": 0.19420669972896576,
362
- "logps/chosen": -361.44647216796875,
363
- "logps/rejected": -409.26702880859375,
364
- "loss": 0.5529,
365
- "rewards/accuracies": 0.746874988079071,
366
- "rewards/chosen": -2.540475368499756,
367
- "rewards/margins": 3.0807271003723145,
368
- "rewards/rejected": -5.62120246887207,
369
  "step": 230
370
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371
  {
372
  "epoch": 1.0,
373
- "step": 239,
374
  "total_flos": 0.0,
375
- "train_loss": 0.6121151776493344,
376
- "train_runtime": 6995.9878,
377
- "train_samples_per_second": 8.738,
378
  "train_steps_per_second": 0.034
379
  }
380
  ],
381
  "logging_steps": 10,
382
- "max_steps": 239,
383
  "num_input_tokens_seen": 0,
384
  "num_train_epochs": 1,
385
  "save_steps": 100,
 
3
  "best_model_checkpoint": null,
4
  "epoch": 1.0,
5
  "eval_steps": 500,
6
+ "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.0,
13
+ "grad_norm": 248.672711818404,
14
+ "learning_rate": 1.25e-08,
15
+ "logits/chosen": -0.5811702013015747,
16
+ "logits/rejected": -0.11655431985855103,
17
+ "logps/chosen": -351.5902099609375,
18
+ "logps/rejected": -240.969970703125,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.03,
28
+ "grad_norm": 222.11982816621227,
29
+ "learning_rate": 1.25e-07,
30
+ "logits/chosen": 0.2612163722515106,
31
+ "logits/rejected": 0.2365657538175583,
32
+ "logps/chosen": -333.2138366699219,
33
+ "logps/rejected": -244.68914794921875,
34
+ "loss": 0.6848,
35
+ "rewards/accuracies": 0.4965277910232544,
36
+ "rewards/chosen": 0.018286287784576416,
37
+ "rewards/margins": 0.01924367994070053,
38
+ "rewards/rejected": -0.0009573940187692642,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.05,
43
+ "grad_norm": 131.0722901505231,
44
+ "learning_rate": 2.5e-07,
45
+ "logits/chosen": 0.00122042594011873,
46
+ "logits/rejected": 0.21793019771575928,
47
+ "logps/chosen": -320.78399658203125,
48
+ "logps/rejected": -234.4599609375,
49
+ "loss": 0.5397,
50
+ "rewards/accuracies": 0.778124988079071,
51
+ "rewards/chosen": 0.3007456660270691,
52
+ "rewards/margins": 0.44470709562301636,
53
+ "rewards/rejected": -0.14396145939826965,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.07,
58
+ "grad_norm": 80.67754344694362,
59
+ "learning_rate": 3.75e-07,
60
+ "logits/chosen": 0.11814385652542114,
61
+ "logits/rejected": -0.13333633542060852,
62
+ "logps/chosen": -305.92999267578125,
63
+ "logps/rejected": -253.8010711669922,
64
+ "loss": 0.3279,
65
+ "rewards/accuracies": 0.8343750238418579,
66
+ "rewards/chosen": 0.9548959732055664,
67
+ "rewards/margins": 2.203428268432617,
68
+ "rewards/rejected": -1.2485322952270508,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.1,
73
+ "grad_norm": 66.3293163499791,
74
+ "learning_rate": 5e-07,
75
+ "logits/chosen": -0.011821460910141468,
76
+ "logits/rejected": -0.11223969608545303,
77
+ "logps/chosen": -307.73626708984375,
78
+ "logps/rejected": -266.0770263671875,
79
+ "loss": 0.2627,
80
+ "rewards/accuracies": 0.8812500238418579,
81
+ "rewards/chosen": 1.6875461339950562,
82
+ "rewards/margins": 3.763516664505005,
83
+ "rewards/rejected": -2.075970411300659,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.12,
88
+ "grad_norm": 72.19599673796307,
89
+ "learning_rate": 4.990486745229364e-07,
90
+ "logits/chosen": 0.3851412832736969,
91
+ "logits/rejected": 0.5114809274673462,
92
+ "logps/chosen": -316.8995056152344,
93
+ "logps/rejected": -269.6571350097656,
94
+ "loss": 0.229,
95
+ "rewards/accuracies": 0.8843749761581421,
96
+ "rewards/chosen": 1.225061058998108,
97
+ "rewards/margins": 4.17338228225708,
98
+ "rewards/rejected": -2.9483208656311035,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.15,
103
+ "grad_norm": 53.904395061776626,
104
+ "learning_rate": 4.96201938253052e-07,
105
+ "logits/chosen": 0.19928967952728271,
106
+ "logits/rejected": 0.3621904253959656,
107
+ "logps/chosen": -323.8656921386719,
108
+ "logps/rejected": -287.19903564453125,
109
+ "loss": 0.2394,
110
+ "rewards/accuracies": 0.8812500238418579,
111
+ "rewards/chosen": 0.07580803334712982,
112
+ "rewards/margins": 4.283209800720215,
113
+ "rewards/rejected": -4.207401752471924,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.17,
118
+ "grad_norm": 70.90163796602725,
119
+ "learning_rate": 4.91481456572267e-07,
120
+ "logits/chosen": 0.33822208642959595,
121
+ "logits/rejected": 0.5376901626586914,
122
+ "logps/chosen": -337.15509033203125,
123
+ "logps/rejected": -272.99346923828125,
124
+ "loss": 0.2201,
125
+ "rewards/accuracies": 0.9156249761581421,
126
+ "rewards/chosen": 0.19220082461833954,
127
+ "rewards/margins": 4.760615348815918,
128
+ "rewards/rejected": -4.56841516494751,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.2,
133
+ "grad_norm": 52.67912669957963,
134
+ "learning_rate": 4.849231551964771e-07,
135
+ "logits/chosen": 0.9565310478210449,
136
+ "logits/rejected": 0.9506447911262512,
137
+ "logps/chosen": -327.2256774902344,
138
+ "logps/rejected": -293.74700927734375,
139
+ "loss": 0.2138,
140
+ "rewards/accuracies": 0.9375,
141
+ "rewards/chosen": -1.5700352191925049,
142
+ "rewards/margins": 5.063401222229004,
143
+ "rewards/rejected": -6.633436679840088,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.23,
148
+ "grad_norm": 65.22420033118601,
149
+ "learning_rate": 4.7657694675916247e-07,
150
+ "logits/chosen": 0.3500242233276367,
151
+ "logits/rejected": 0.21482405066490173,
152
+ "logps/chosen": -315.8086242675781,
153
+ "logps/rejected": -293.5943908691406,
154
+ "loss": 0.2317,
155
+ "rewards/accuracies": 0.90625,
156
+ "rewards/chosen": 0.491361141204834,
157
+ "rewards/margins": 5.032988548278809,
158
+ "rewards/rejected": -4.541626930236816,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.25,
163
+ "grad_norm": 54.96225429454786,
164
+ "learning_rate": 4.6650635094610966e-07,
165
+ "logits/chosen": 0.10862906277179718,
166
+ "logits/rejected": 0.24883398413658142,
167
+ "logps/chosen": -349.65911865234375,
168
+ "logps/rejected": -326.064697265625,
169
+ "loss": 0.1952,
170
+ "rewards/accuracies": 0.893750011920929,
171
+ "rewards/chosen": -1.6933103799819946,
172
+ "rewards/margins": 4.797018051147461,
173
+ "rewards/rejected": -6.490328788757324,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.28,
178
+ "grad_norm": 52.005066827882644,
179
+ "learning_rate": 4.5478801107224794e-07,
180
+ "logits/chosen": 0.11174388229846954,
181
+ "logits/rejected": 0.27005332708358765,
182
+ "logps/chosen": -326.76959228515625,
183
+ "logps/rejected": -285.99151611328125,
184
+ "loss": 0.1986,
185
+ "rewards/accuracies": 0.934374988079071,
186
+ "rewards/chosen": -0.6088568568229675,
187
+ "rewards/margins": 5.2384033203125,
188
+ "rewards/rejected": -5.847259521484375,
189
  "step": 110
190
  },
191
  {
192
+ "epoch": 0.3,
193
+ "grad_norm": 80.2735393844808,
194
+ "learning_rate": 4.415111107797445e-07,
195
+ "logits/chosen": 0.12390895932912827,
196
+ "logits/rejected": 0.2671768069267273,
197
+ "logps/chosen": -314.864013671875,
198
+ "logps/rejected": -296.4173278808594,
199
+ "loss": 0.2022,
200
+ "rewards/accuracies": 0.890625,
201
+ "rewards/chosen": -0.602489709854126,
202
+ "rewards/margins": 5.041877746582031,
203
+ "rewards/rejected": -5.644367694854736,
204
  "step": 120
205
  },
206
  {
207
+ "epoch": 0.33,
208
+ "grad_norm": 53.04554797674892,
209
+ "learning_rate": 4.2677669529663686e-07,
210
+ "logits/chosen": 0.2591269612312317,
211
+ "logits/rejected": 0.31469181180000305,
212
+ "logps/chosen": -337.55279541015625,
213
+ "logps/rejected": -303.430908203125,
214
+ "loss": 0.1929,
215
+ "rewards/accuracies": 0.8999999761581421,
216
+ "rewards/chosen": -0.5142609477043152,
217
+ "rewards/margins": 5.346030235290527,
218
+ "rewards/rejected": -5.86029052734375,
219
  "step": 130
220
  },
221
  {
222
+ "epoch": 0.35,
223
+ "grad_norm": 46.55373068779065,
224
+ "learning_rate": 4.106969024216348e-07,
225
+ "logits/chosen": 0.5539799928665161,
226
+ "logits/rejected": 0.3750854432582855,
227
+ "logps/chosen": -323.2265625,
228
+ "logps/rejected": -283.9201354980469,
229
+ "loss": 0.1948,
230
+ "rewards/accuracies": 0.934374988079071,
231
+ "rewards/chosen": -0.46524372696876526,
232
+ "rewards/margins": 5.173353672027588,
233
+ "rewards/rejected": -5.63859748840332,
234
  "step": 140
235
  },
236
  {
237
+ "epoch": 0.38,
238
+ "grad_norm": 57.84664559821586,
239
+ "learning_rate": 3.933941090877615e-07,
240
+ "logits/chosen": -0.04622136428952217,
241
+ "logits/rejected": 0.10014678537845612,
242
+ "logps/chosen": -330.5162353515625,
243
+ "logps/rejected": -301.1103515625,
244
+ "loss": 0.2027,
245
+ "rewards/accuracies": 0.8999999761581421,
246
+ "rewards/chosen": -0.5420152544975281,
247
+ "rewards/margins": 4.931308746337891,
248
+ "rewards/rejected": -5.473323822021484,
249
  "step": 150
250
  },
251
  {
252
+ "epoch": 0.4,
253
+ "grad_norm": 90.75814646308999,
254
+ "learning_rate": 3.75e-07,
255
+ "logits/chosen": 0.11180607229471207,
256
+ "logits/rejected": -0.048911936581134796,
257
+ "logps/chosen": -295.4419860839844,
258
+ "logps/rejected": -279.9349670410156,
259
+ "loss": 0.1841,
260
+ "rewards/accuracies": 0.921875,
261
+ "rewards/chosen": -0.33810311555862427,
262
+ "rewards/margins": 4.924574375152588,
263
+ "rewards/rejected": -5.262677192687988,
264
  "step": 160
265
  },
266
  {
267
+ "epoch": 0.42,
268
+ "grad_norm": 48.25868450687608,
269
+ "learning_rate": 3.5565456543517485e-07,
270
+ "logits/chosen": -0.2750697731971741,
271
+ "logits/rejected": 0.026401836425065994,
272
+ "logps/chosen": -324.1236572265625,
273
+ "logps/rejected": -290.73785400390625,
274
+ "loss": 0.1938,
275
+ "rewards/accuracies": 0.887499988079071,
276
+ "rewards/chosen": -0.8598020672798157,
277
+ "rewards/margins": 5.191195487976074,
278
+ "rewards/rejected": -6.050997257232666,
279
  "step": 170
280
  },
281
  {
282
+ "epoch": 0.45,
283
+ "grad_norm": 55.57273036697954,
284
+ "learning_rate": 3.355050358314172e-07,
285
+ "logits/chosen": -0.17330403625965118,
286
+ "logits/rejected": -0.37704771757125854,
287
+ "logps/chosen": -335.1918029785156,
288
+ "logps/rejected": -303.3643493652344,
289
+ "loss": 0.1893,
290
+ "rewards/accuracies": 0.940625011920929,
291
+ "rewards/chosen": 0.3236238360404968,
292
+ "rewards/margins": 5.686396598815918,
293
+ "rewards/rejected": -5.3627729415893555,
294
  "step": 180
295
  },
296
  {
297
+ "epoch": 0.47,
298
+ "grad_norm": 34.4619211960604,
299
+ "learning_rate": 3.147047612756302e-07,
300
+ "logits/chosen": 0.051471106708049774,
301
+ "logits/rejected": 0.08085541427135468,
302
+ "logps/chosen": -341.36932373046875,
303
+ "logps/rejected": -306.84906005859375,
304
+ "loss": 0.1714,
305
+ "rewards/accuracies": 0.9312499761581421,
306
+ "rewards/chosen": -0.4273417592048645,
307
+ "rewards/margins": 5.435866355895996,
308
+ "rewards/rejected": -5.863208293914795,
309
  "step": 190
310
  },
311
  {
312
+ "epoch": 0.5,
313
+ "grad_norm": 56.25034316469711,
314
+ "learning_rate": 2.934120444167326e-07,
315
+ "logits/chosen": 0.2834271490573883,
316
+ "logits/rejected": 0.4803285002708435,
317
+ "logps/chosen": -320.8459777832031,
318
+ "logps/rejected": -293.8531799316406,
319
+ "loss": 0.1797,
320
+ "rewards/accuracies": 0.9125000238418579,
321
+ "rewards/chosen": -1.590319275856018,
322
+ "rewards/margins": 5.1105732917785645,
323
+ "rewards/rejected": -6.700892448425293,
324
  "step": 200
325
  },
326
  {
327
+ "epoch": 0.53,
328
+ "grad_norm": 42.616613118118565,
329
+ "learning_rate": 2.717889356869146e-07,
330
+ "logits/chosen": -0.1653163880109787,
331
+ "logits/rejected": -0.19997279345989227,
332
+ "logps/chosen": -297.6966857910156,
333
+ "logps/rejected": -281.0061950683594,
334
+ "loss": 0.1905,
335
+ "rewards/accuracies": 0.921875,
336
+ "rewards/chosen": 0.32219845056533813,
337
+ "rewards/margins": 5.068012237548828,
338
+ "rewards/rejected": -4.745813846588135,
339
  "step": 210
340
  },
341
  {
342
+ "epoch": 0.55,
343
+ "grad_norm": 54.94298294558815,
344
+ "learning_rate": 2.5e-07,
345
+ "logits/chosen": 0.11906982958316803,
346
+ "logits/rejected": 0.16367843747138977,
347
+ "logps/chosen": -322.9801330566406,
348
+ "logps/rejected": -289.7718811035156,
349
+ "loss": 0.2379,
350
+ "rewards/accuracies": 0.9312499761581421,
351
+ "rewards/chosen": -0.8957729339599609,
352
+ "rewards/margins": 4.5600104331970215,
353
+ "rewards/rejected": -5.455783367156982,
354
  "step": 220
355
  },
356
  {
357
+ "epoch": 0.57,
358
+ "grad_norm": 45.798677109719435,
359
+ "learning_rate": 2.2821106431308543e-07,
360
+ "logits/chosen": 0.26459816098213196,
361
+ "logits/rejected": 0.37918907403945923,
362
+ "logps/chosen": -308.4639892578125,
363
+ "logps/rejected": -284.7757873535156,
364
+ "loss": 0.1974,
365
+ "rewards/accuracies": 0.90625,
366
+ "rewards/chosen": -0.7637487649917603,
367
+ "rewards/margins": 5.488121032714844,
368
+ "rewards/rejected": -6.251870155334473,
369
  "step": 230
370
  },
371
+ {
372
+ "epoch": 0.6,
373
+ "grad_norm": 71.171077628863,
374
+ "learning_rate": 2.065879555832674e-07,
375
+ "logits/chosen": 0.11944101750850677,
376
+ "logits/rejected": 0.10174547135829926,
377
+ "logps/chosen": -317.2291564941406,
378
+ "logps/rejected": -301.72357177734375,
379
+ "loss": 0.1761,
380
+ "rewards/accuracies": 0.9281250238418579,
381
+ "rewards/chosen": -0.4725034832954407,
382
+ "rewards/margins": 5.045371055603027,
383
+ "rewards/rejected": -5.517874717712402,
384
+ "step": 240
385
+ },
386
+ {
387
+ "epoch": 0.62,
388
+ "grad_norm": 58.77603788283333,
389
+ "learning_rate": 1.8529523872436977e-07,
390
+ "logits/chosen": -0.0030237496830523014,
391
+ "logits/rejected": 0.24394559860229492,
392
+ "logps/chosen": -337.0401306152344,
393
+ "logps/rejected": -304.41485595703125,
394
+ "loss": 0.2199,
395
+ "rewards/accuracies": 0.90625,
396
+ "rewards/chosen": -1.1832717657089233,
397
+ "rewards/margins": 4.3705525398254395,
398
+ "rewards/rejected": -5.553823947906494,
399
+ "step": 250
400
+ },
401
+ {
402
+ "epoch": 0.65,
403
+ "grad_norm": 58.549851578658085,
404
+ "learning_rate": 1.6449496416858282e-07,
405
+ "logits/chosen": 0.2534635663032532,
406
+ "logits/rejected": 0.33454760909080505,
407
+ "logps/chosen": -322.7724609375,
408
+ "logps/rejected": -291.37420654296875,
409
+ "loss": 0.1828,
410
+ "rewards/accuracies": 0.9437500238418579,
411
+ "rewards/chosen": -1.3282363414764404,
412
+ "rewards/margins": 5.129273414611816,
413
+ "rewards/rejected": -6.457509517669678,
414
+ "step": 260
415
+ },
416
+ {
417
+ "epoch": 0.68,
418
+ "grad_norm": 61.69689329910415,
419
+ "learning_rate": 1.4434543456482518e-07,
420
+ "logits/chosen": 0.16627629101276398,
421
+ "logits/rejected": 0.17112889885902405,
422
+ "logps/chosen": -328.3280944824219,
423
+ "logps/rejected": -302.532958984375,
424
+ "loss": 0.1701,
425
+ "rewards/accuracies": 0.918749988079071,
426
+ "rewards/chosen": -0.8465593457221985,
427
+ "rewards/margins": 5.068365573883057,
428
+ "rewards/rejected": -5.914924621582031,
429
+ "step": 270
430
+ },
431
+ {
432
+ "epoch": 0.7,
433
+ "grad_norm": 82.28572950186891,
434
+ "learning_rate": 1.2500000000000005e-07,
435
+ "logits/chosen": 0.07045526802539825,
436
+ "logits/rejected": 0.055424489080905914,
437
+ "logps/chosen": -314.9088134765625,
438
+ "logps/rejected": -283.484130859375,
439
+ "loss": 0.1988,
440
+ "rewards/accuracies": 0.934374988079071,
441
+ "rewards/chosen": -0.05264568328857422,
442
+ "rewards/margins": 5.250518798828125,
443
+ "rewards/rejected": -5.303164958953857,
444
+ "step": 280
445
+ },
446
+ {
447
+ "epoch": 0.72,
448
+ "grad_norm": 73.90506249227809,
449
+ "learning_rate": 1.0660589091223854e-07,
450
+ "logits/chosen": 0.08644680678844452,
451
+ "logits/rejected": 0.3770992159843445,
452
+ "logps/chosen": -334.16839599609375,
453
+ "logps/rejected": -295.61138916015625,
454
+ "loss": 0.1816,
455
+ "rewards/accuracies": 0.9312499761581421,
456
+ "rewards/chosen": -0.6267386674880981,
457
+ "rewards/margins": 5.2511420249938965,
458
+ "rewards/rejected": -5.877881050109863,
459
+ "step": 290
460
+ },
461
+ {
462
+ "epoch": 0.75,
463
+ "grad_norm": 55.979754279546064,
464
+ "learning_rate": 8.930309757836516e-08,
465
+ "logits/chosen": 0.0685787945985794,
466
+ "logits/rejected": 0.4188007414340973,
467
+ "logps/chosen": -330.8103332519531,
468
+ "logps/rejected": -297.2096252441406,
469
+ "loss": 0.1831,
470
+ "rewards/accuracies": 0.8999999761581421,
471
+ "rewards/chosen": -0.8838691711425781,
472
+ "rewards/margins": 4.987154960632324,
473
+ "rewards/rejected": -5.871024131774902,
474
+ "step": 300
475
+ },
476
+ {
477
+ "epoch": 0.78,
478
+ "grad_norm": 50.80261213037778,
479
+ "learning_rate": 7.322330470336313e-08,
480
+ "logits/chosen": 0.22002212703227997,
481
+ "logits/rejected": 0.2094310224056244,
482
+ "logps/chosen": -321.89739990234375,
483
+ "logps/rejected": -294.4070739746094,
484
+ "loss": 0.1741,
485
+ "rewards/accuracies": 0.918749988079071,
486
+ "rewards/chosen": -0.5131534337997437,
487
+ "rewards/margins": 5.067374229431152,
488
+ "rewards/rejected": -5.580528259277344,
489
+ "step": 310
490
+ },
491
+ {
492
+ "epoch": 0.8,
493
+ "grad_norm": 62.71796095386463,
494
+ "learning_rate": 5.848888922025552e-08,
495
+ "logits/chosen": 0.0758807510137558,
496
+ "logits/rejected": 0.19501206278800964,
497
+ "logps/chosen": -337.13995361328125,
498
+ "logps/rejected": -296.71954345703125,
499
+ "loss": 0.1841,
500
+ "rewards/accuracies": 0.953125,
501
+ "rewards/chosen": -0.4793934226036072,
502
+ "rewards/margins": 5.182127952575684,
503
+ "rewards/rejected": -5.661521911621094,
504
+ "step": 320
505
+ },
506
+ {
507
+ "epoch": 0.82,
508
+ "grad_norm": 55.862146356475115,
509
+ "learning_rate": 4.521198892775202e-08,
510
+ "logits/chosen": 0.08274734020233154,
511
+ "logits/rejected": -0.0012732266914099455,
512
+ "logps/chosen": -333.7369079589844,
513
+ "logps/rejected": -296.9526062011719,
514
+ "loss": 0.1768,
515
+ "rewards/accuracies": 0.925000011920929,
516
+ "rewards/chosen": -0.4573189616203308,
517
+ "rewards/margins": 5.402385711669922,
518
+ "rewards/rejected": -5.859705448150635,
519
+ "step": 330
520
+ },
521
+ {
522
+ "epoch": 0.85,
523
+ "grad_norm": 78.6552872883194,
524
+ "learning_rate": 3.349364905389032e-08,
525
+ "logits/chosen": -0.16344629228115082,
526
+ "logits/rejected": 0.0033723146189004183,
527
+ "logps/chosen": -322.8833312988281,
528
+ "logps/rejected": -298.59588623046875,
529
+ "loss": 0.1682,
530
+ "rewards/accuracies": 0.8999999761581421,
531
+ "rewards/chosen": -0.6776003837585449,
532
+ "rewards/margins": 5.152725696563721,
533
+ "rewards/rejected": -5.830325126647949,
534
+ "step": 340
535
+ },
536
+ {
537
+ "epoch": 0.88,
538
+ "grad_norm": 57.94825784387087,
539
+ "learning_rate": 2.3423053240837514e-08,
540
+ "logits/chosen": 0.06374481320381165,
541
+ "logits/rejected": 0.13793043792247772,
542
+ "logps/chosen": -319.5912780761719,
543
+ "logps/rejected": -297.6089172363281,
544
+ "loss": 0.1912,
545
+ "rewards/accuracies": 0.903124988079071,
546
+ "rewards/chosen": -0.5590634346008301,
547
+ "rewards/margins": 5.25864315032959,
548
+ "rewards/rejected": -5.81770658493042,
549
+ "step": 350
550
+ },
551
+ {
552
+ "epoch": 0.9,
553
+ "grad_norm": 48.20871737390797,
554
+ "learning_rate": 1.507684480352292e-08,
555
+ "logits/chosen": 0.1408473700284958,
556
+ "logits/rejected": 0.24622826278209686,
557
+ "logps/chosen": -323.8467712402344,
558
+ "logps/rejected": -301.0394592285156,
559
+ "loss": 0.1714,
560
+ "rewards/accuracies": 0.9312499761581421,
561
+ "rewards/chosen": -0.5114877820014954,
562
+ "rewards/margins": 5.479299068450928,
563
+ "rewards/rejected": -5.990786552429199,
564
+ "step": 360
565
+ },
566
+ {
567
+ "epoch": 0.93,
568
+ "grad_norm": 66.71686397462808,
569
+ "learning_rate": 8.518543427732949e-09,
570
+ "logits/chosen": -0.14612798392772675,
571
+ "logits/rejected": -0.002347910311073065,
572
+ "logps/chosen": -305.1243896484375,
573
+ "logps/rejected": -282.12835693359375,
574
+ "loss": 0.1673,
575
+ "rewards/accuracies": 0.921875,
576
+ "rewards/chosen": -0.6524958610534668,
577
+ "rewards/margins": 5.196988105773926,
578
+ "rewards/rejected": -5.849484443664551,
579
+ "step": 370
580
+ },
581
+ {
582
+ "epoch": 0.95,
583
+ "grad_norm": 61.291842343672215,
584
+ "learning_rate": 3.798061746947995e-09,
585
+ "logits/chosen": 0.044129520654678345,
586
+ "logits/rejected": 0.14980553090572357,
587
+ "logps/chosen": -319.0976257324219,
588
+ "logps/rejected": -283.139892578125,
589
+ "loss": 0.1714,
590
+ "rewards/accuracies": 0.940625011920929,
591
+ "rewards/chosen": -0.5317240953445435,
592
+ "rewards/margins": 5.177973747253418,
593
+ "rewards/rejected": -5.70969820022583,
594
+ "step": 380
595
+ },
596
+ {
597
+ "epoch": 0.97,
598
+ "grad_norm": 56.153629064135075,
599
+ "learning_rate": 9.513254770636137e-10,
600
+ "logits/chosen": -0.012590537779033184,
601
+ "logits/rejected": 0.09102629870176315,
602
+ "logps/chosen": -334.525634765625,
603
+ "logps/rejected": -287.55987548828125,
604
+ "loss": 0.1797,
605
+ "rewards/accuracies": 0.925000011920929,
606
+ "rewards/chosen": -0.5071713328361511,
607
+ "rewards/margins": 5.1908040046691895,
608
+ "rewards/rejected": -5.697975158691406,
609
+ "step": 390
610
+ },
611
+ {
612
+ "epoch": 1.0,
613
+ "grad_norm": 40.78979544232789,
614
+ "learning_rate": 0.0,
615
+ "logits/chosen": 0.030362462624907494,
616
+ "logits/rejected": 0.237002894282341,
617
+ "logps/chosen": -320.5105895996094,
618
+ "logps/rejected": -285.7564697265625,
619
+ "loss": 0.1703,
620
+ "rewards/accuracies": 0.9156249761581421,
621
+ "rewards/chosen": -0.8242006301879883,
622
+ "rewards/margins": 5.20082950592041,
623
+ "rewards/rejected": -6.025030136108398,
624
+ "step": 400
625
+ },
626
  {
627
  "epoch": 1.0,
628
+ "step": 400,
629
  "total_flos": 0.0,
630
+ "train_loss": 0.2186826115846634,
631
+ "train_runtime": 11940.2176,
632
+ "train_samples_per_second": 8.573,
633
  "train_steps_per_second": 0.034
634
  }
635
  ],
636
  "logging_steps": 10,
637
+ "max_steps": 400,
638
  "num_input_tokens_seen": 0,
639
  "num_train_epochs": 1,
640
  "save_steps": 100,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fcf5c85f80c92a9eacc9ea3ac3046fce1e674dd5e2c6a60e50c6fe6b0e4a3a2
3
  size 6328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f555e8b191e5dee3bafecffb3a88f44ea1db78545e88e62c9e7b6e0969daad
3
  size 6328