zfz1 commited on
Commit
3cbea15
1 Parent(s): 759010b

Model save

Browse files
Files changed (4) hide show
  1. README.md +63 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +600 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: deepseek-ai/deepseek-math-7b-base
3
+ library_name: peft
4
+ license: other
5
+ tags:
6
+ - trl
7
+ - orpo
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: deepseek-8b-orpo-lora
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/thuzfz1/huggingface/runs/zs68qfsi)
18
+ # deepseek-8b-orpo-lora
19
+
20
+ This model is a fine-tuned version of [deepseek-ai/deepseek-math-7b-base](https://huggingface.co/deepseek-ai/deepseek-math-7b-base) on an unknown dataset.
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 3e-06
40
+ - train_batch_size: 16
41
+ - eval_batch_size: 16
42
+ - seed: 43
43
+ - distributed_type: multi-GPU
44
+ - num_devices: 4
45
+ - gradient_accumulation_steps: 2
46
+ - total_train_batch_size: 128
47
+ - total_eval_batch_size: 64
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: cosine
50
+ - lr_scheduler_warmup_ratio: 0.1
51
+ - num_epochs: 2
52
+
53
+ ### Training results
54
+
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - PEFT 0.11.1
60
+ - Transformers 4.42.3
61
+ - Pytorch 2.1.2
62
+ - Datasets 2.20.0
63
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9936102236421727,
3
+ "total_flos": 0.0,
4
+ "train_loss": 1.3676127867820935,
5
+ "train_runtime": 5524.9246,
6
+ "train_samples": 20000,
7
+ "train_samples_per_second": 7.24,
8
+ "train_steps_per_second": 0.056
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.9936102236421727,
3
+ "total_flos": 0.0,
4
+ "train_loss": 1.3676127867820935,
5
+ "train_runtime": 5524.9246,
6
+ "train_samples": 20000,
7
+ "train_samples_per_second": 7.24,
8
+ "train_steps_per_second": 0.056
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9936102236421727,
5
+ "eval_steps": 10000,
6
+ "global_step": 312,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.06389776357827476,
13
+ "grad_norm": 0.341777060295131,
14
+ "learning_rate": 9.375e-07,
15
+ "log_odds_chosen": 0.05486620217561722,
16
+ "log_odds_ratio": -0.6975381374359131,
17
+ "logits/chosen": 33.646114349365234,
18
+ "logits/rejected": 33.55038833618164,
19
+ "logps/chosen": -0.9772504568099976,
20
+ "logps/rejected": -1.0204073190689087,
21
+ "loss": 1.6735,
22
+ "nll_loss": 1.5879063606262207,
23
+ "rewards/accuracies": 0.543749988079071,
24
+ "rewards/chosen": -0.09772505611181259,
25
+ "rewards/margins": 0.004315672442317009,
26
+ "rewards/rejected": -0.10204073041677475,
27
+ "step": 10
28
+ },
29
+ {
30
+ "epoch": 0.12779552715654952,
31
+ "grad_norm": 0.357474094120966,
32
+ "learning_rate": 1.875e-06,
33
+ "log_odds_chosen": 0.054913729429244995,
34
+ "log_odds_ratio": -0.6985839009284973,
35
+ "logits/chosen": 34.386383056640625,
36
+ "logits/rejected": 33.98283386230469,
37
+ "logps/chosen": -0.9762754440307617,
38
+ "logps/rejected": -1.0154238939285278,
39
+ "loss": 1.668,
40
+ "nll_loss": 1.6016145944595337,
41
+ "rewards/accuracies": 0.515625,
42
+ "rewards/chosen": -0.09762755781412125,
43
+ "rewards/margins": 0.003914830274879932,
44
+ "rewards/rejected": -0.10154237598180771,
45
+ "step": 20
46
+ },
47
+ {
48
+ "epoch": 0.19169329073482427,
49
+ "grad_norm": 0.3765211315354108,
50
+ "learning_rate": 2.8125e-06,
51
+ "log_odds_chosen": 0.06909728795289993,
52
+ "log_odds_ratio": -0.6911064386367798,
53
+ "logits/chosen": 33.32851028442383,
54
+ "logits/rejected": 32.749427795410156,
55
+ "logps/chosen": -0.9813788533210754,
56
+ "logps/rejected": -1.0349732637405396,
57
+ "loss": 1.6757,
58
+ "nll_loss": 1.6085166931152344,
59
+ "rewards/accuracies": 0.5249999761581421,
60
+ "rewards/chosen": -0.09813789278268814,
61
+ "rewards/margins": 0.005359448026865721,
62
+ "rewards/rejected": -0.10349734127521515,
63
+ "step": 30
64
+ },
65
+ {
66
+ "epoch": 0.25559105431309903,
67
+ "grad_norm": 0.39833625263223615,
68
+ "learning_rate": 2.993961440992859e-06,
69
+ "log_odds_chosen": 0.11841567605733871,
70
+ "log_odds_ratio": -0.6757725477218628,
71
+ "logits/chosen": 33.839942932128906,
72
+ "logits/rejected": 33.162986755371094,
73
+ "logps/chosen": -0.9702426791191101,
74
+ "logps/rejected": -1.0581519603729248,
75
+ "loss": 1.664,
76
+ "nll_loss": 1.6022984981536865,
77
+ "rewards/accuracies": 0.6031249761581421,
78
+ "rewards/chosen": -0.09702426195144653,
79
+ "rewards/margins": 0.008790932595729828,
80
+ "rewards/rejected": -0.10581519454717636,
81
+ "step": 40
82
+ },
83
+ {
84
+ "epoch": 0.3194888178913738,
85
+ "grad_norm": 0.45952260963075464,
86
+ "learning_rate": 2.9695130976348534e-06,
87
+ "log_odds_chosen": 0.09267185628414154,
88
+ "log_odds_ratio": -0.684535801410675,
89
+ "logits/chosen": 34.052207946777344,
90
+ "logits/rejected": 32.802547454833984,
91
+ "logps/chosen": -0.9801093935966492,
92
+ "logps/rejected": -1.0446739196777344,
93
+ "loss": 1.6582,
94
+ "nll_loss": 1.5952296257019043,
95
+ "rewards/accuracies": 0.565625011920929,
96
+ "rewards/chosen": -0.09801094233989716,
97
+ "rewards/margins": 0.0064564356580376625,
98
+ "rewards/rejected": -0.10446737706661224,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.38338658146964855,
103
+ "grad_norm": 0.49504068932785444,
104
+ "learning_rate": 2.9265847744427307e-06,
105
+ "log_odds_chosen": 0.06481704860925674,
106
+ "log_odds_ratio": -0.698478102684021,
107
+ "logits/chosen": 32.32544708251953,
108
+ "logits/rejected": 32.51976776123047,
109
+ "logps/chosen": -0.9770153164863586,
110
+ "logps/rejected": -1.0272716283798218,
111
+ "loss": 1.6387,
112
+ "nll_loss": 1.572589635848999,
113
+ "rewards/accuracies": 0.5687500238418579,
114
+ "rewards/chosen": -0.0977015346288681,
115
+ "rewards/margins": 0.0050256275571882725,
116
+ "rewards/rejected": -0.10272715240716934,
117
+ "step": 60
118
+ },
119
+ {
120
+ "epoch": 0.4472843450479233,
121
+ "grad_norm": 0.5262433549192981,
122
+ "learning_rate": 2.865716319988224e-06,
123
+ "log_odds_chosen": 0.030134931206703186,
124
+ "log_odds_ratio": -0.7101849317550659,
125
+ "logits/chosen": 33.02894973754883,
126
+ "logits/rejected": 33.309715270996094,
127
+ "logps/chosen": -0.9551456570625305,
128
+ "logps/rejected": -0.9776851534843445,
129
+ "loss": 1.6289,
130
+ "nll_loss": 1.5476250648498535,
131
+ "rewards/accuracies": 0.5531250238418579,
132
+ "rewards/chosen": -0.09551456570625305,
133
+ "rewards/margins": 0.0022539461497217417,
134
+ "rewards/rejected": -0.09776850789785385,
135
+ "step": 70
136
+ },
137
+ {
138
+ "epoch": 0.5111821086261981,
139
+ "grad_norm": 0.5188886669640671,
140
+ "learning_rate": 2.7876731904027993e-06,
141
+ "log_odds_chosen": 0.08591620624065399,
142
+ "log_odds_ratio": -0.6787055730819702,
143
+ "logits/chosen": 34.61581039428711,
144
+ "logits/rejected": 34.63213348388672,
145
+ "logps/chosen": -0.9702480435371399,
146
+ "logps/rejected": -1.0235538482666016,
147
+ "loss": 1.5921,
148
+ "nll_loss": 1.5600519180297852,
149
+ "rewards/accuracies": 0.5625,
150
+ "rewards/chosen": -0.0970248132944107,
151
+ "rewards/margins": 0.005330582614988089,
152
+ "rewards/rejected": -0.10235539823770523,
153
+ "step": 80
154
+ },
155
+ {
156
+ "epoch": 0.5750798722044729,
157
+ "grad_norm": 0.5939864943475555,
158
+ "learning_rate": 2.6934368233226715e-06,
159
+ "log_odds_chosen": 0.0763852447271347,
160
+ "log_odds_ratio": -0.6891772150993347,
161
+ "logits/chosen": 34.269248962402344,
162
+ "logits/rejected": 34.00069046020508,
163
+ "logps/chosen": -0.9529930949211121,
164
+ "logps/rejected": -1.0018848180770874,
165
+ "loss": 1.586,
166
+ "nll_loss": 1.5087509155273438,
167
+ "rewards/accuracies": 0.5687500238418579,
168
+ "rewards/chosen": -0.09529931843280792,
169
+ "rewards/margins": 0.004889167379587889,
170
+ "rewards/rejected": -0.1001884788274765,
171
+ "step": 90
172
+ },
173
+ {
174
+ "epoch": 0.6389776357827476,
175
+ "grad_norm": 0.6337044866684244,
176
+ "learning_rate": 2.584192295741087e-06,
177
+ "log_odds_chosen": 0.057517312467098236,
178
+ "log_odds_ratio": -0.7008931040763855,
179
+ "logits/chosen": 32.44559860229492,
180
+ "logits/rejected": 32.7852783203125,
181
+ "logps/chosen": -0.9386932253837585,
182
+ "logps/rejected": -0.9813023805618286,
183
+ "loss": 1.5499,
184
+ "nll_loss": 1.4884004592895508,
185
+ "rewards/accuracies": 0.5562499761581421,
186
+ "rewards/chosen": -0.09386932104825974,
187
+ "rewards/margins": 0.004260920453816652,
188
+ "rewards/rejected": -0.0981302410364151,
189
+ "step": 100
190
+ },
191
+ {
192
+ "epoch": 0.7028753993610224,
193
+ "grad_norm": 0.658610355052368,
194
+ "learning_rate": 2.461313420977536e-06,
195
+ "log_odds_chosen": 0.06078052520751953,
196
+ "log_odds_ratio": -0.694057822227478,
197
+ "logits/chosen": 34.87937927246094,
198
+ "logits/rejected": 34.10810470581055,
199
+ "logps/chosen": -0.9328826665878296,
200
+ "logps/rejected": -0.9768407940864563,
201
+ "loss": 1.5083,
202
+ "nll_loss": 1.4501432180404663,
203
+ "rewards/accuracies": 0.543749988079071,
204
+ "rewards/chosen": -0.09328827261924744,
205
+ "rewards/margins": 0.004395808558911085,
206
+ "rewards/rejected": -0.09768407791852951,
207
+ "step": 110
208
+ },
209
+ {
210
+ "epoch": 0.7667731629392971,
211
+ "grad_norm": 0.7021451570051785,
212
+ "learning_rate": 2.3263454721781537e-06,
213
+ "log_odds_chosen": 0.012948527932167053,
214
+ "log_odds_ratio": -0.7222181558609009,
215
+ "logits/chosen": 32.433719635009766,
216
+ "logits/rejected": 32.53590774536133,
217
+ "logps/chosen": -0.9092488288879395,
218
+ "logps/rejected": -0.9225506782531738,
219
+ "loss": 1.4673,
220
+ "nll_loss": 1.4067564010620117,
221
+ "rewards/accuracies": 0.4937500059604645,
222
+ "rewards/chosen": -0.09092487394809723,
223
+ "rewards/margins": 0.0013302009319886565,
224
+ "rewards/rejected": -0.09225507825613022,
225
+ "step": 120
226
+ },
227
+ {
228
+ "epoch": 0.8306709265175719,
229
+ "grad_norm": 0.6227085414802311,
230
+ "learning_rate": 2.18098574960932e-06,
231
+ "log_odds_chosen": 0.08671535551548004,
232
+ "log_odds_ratio": -0.6804493069648743,
233
+ "logits/chosen": 33.88345718383789,
234
+ "logits/rejected": 33.001914978027344,
235
+ "logps/chosen": -0.8342811465263367,
236
+ "logps/rejected": -0.8885458707809448,
237
+ "loss": 1.3999,
238
+ "nll_loss": 1.3158425092697144,
239
+ "rewards/accuracies": 0.581250011920929,
240
+ "rewards/chosen": -0.08342811465263367,
241
+ "rewards/margins": 0.005426469258964062,
242
+ "rewards/rejected": -0.0888545885682106,
243
+ "step": 130
244
+ },
245
+ {
246
+ "epoch": 0.8945686900958466,
247
+ "grad_norm": 0.6264864351444569,
248
+ "learning_rate": 2.027062236122014e-06,
249
+ "log_odds_chosen": 0.04714164510369301,
250
+ "log_odds_ratio": -0.6999929547309875,
251
+ "logits/chosen": 31.832406997680664,
252
+ "logits/rejected": 31.350921630859375,
253
+ "logps/chosen": -0.8387459516525269,
254
+ "logps/rejected": -0.876442551612854,
255
+ "loss": 1.3491,
256
+ "nll_loss": 1.2940082550048828,
257
+ "rewards/accuracies": 0.534375011920929,
258
+ "rewards/chosen": -0.08387459814548492,
259
+ "rewards/margins": 0.0037696503568440676,
260
+ "rewards/rejected": -0.08764425665140152,
261
+ "step": 140
262
+ },
263
+ {
264
+ "epoch": 0.9584664536741214,
265
+ "grad_norm": 0.5655314348380543,
266
+ "learning_rate": 1.866510609206841e-06,
267
+ "log_odds_chosen": 0.044148243963718414,
268
+ "log_odds_ratio": -0.7019084692001343,
269
+ "logits/chosen": 33.6451416015625,
270
+ "logits/rejected": 33.04177474975586,
271
+ "logps/chosen": -0.8360700607299805,
272
+ "logps/rejected": -0.8655373454093933,
273
+ "loss": 1.3242,
274
+ "nll_loss": 1.2609388828277588,
275
+ "rewards/accuracies": 0.53125,
276
+ "rewards/chosen": -0.08360700309276581,
277
+ "rewards/margins": 0.0029467367567121983,
278
+ "rewards/rejected": -0.08655373752117157,
279
+ "step": 150
280
+ },
281
+ {
282
+ "epoch": 1.0223642172523961,
283
+ "grad_norm": 0.5571454673635674,
284
+ "learning_rate": 1.7013498987264833e-06,
285
+ "log_odds_chosen": 0.06723789870738983,
286
+ "log_odds_ratio": -0.6888397932052612,
287
+ "logits/chosen": 33.27342987060547,
288
+ "logits/rejected": 32.30434036254883,
289
+ "logps/chosen": -0.7994817495346069,
290
+ "logps/rejected": -0.8369095921516418,
291
+ "loss": 1.2899,
292
+ "nll_loss": 1.211586594581604,
293
+ "rewards/accuracies": 0.565625011920929,
294
+ "rewards/chosen": -0.07994817197322845,
295
+ "rewards/margins": 0.0037427886854857206,
296
+ "rewards/rejected": -0.08369095623493195,
297
+ "step": 160
298
+ },
299
+ {
300
+ "epoch": 1.0862619808306708,
301
+ "grad_norm": 0.5333966997862959,
302
+ "learning_rate": 1.5336570964437077e-06,
303
+ "log_odds_chosen": 0.038995109498500824,
304
+ "log_odds_ratio": -0.6969125866889954,
305
+ "logits/chosen": 32.00867462158203,
306
+ "logits/rejected": 31.789836883544922,
307
+ "logps/chosen": -0.8085753321647644,
308
+ "logps/rejected": -0.8331602215766907,
309
+ "loss": 1.2863,
310
+ "nll_loss": 1.2334020137786865,
311
+ "rewards/accuracies": 0.503125011920929,
312
+ "rewards/chosen": -0.0808575376868248,
313
+ "rewards/margins": 0.0024584876373410225,
314
+ "rewards/rejected": -0.08331602811813354,
315
+ "step": 170
316
+ },
317
+ {
318
+ "epoch": 1.1501597444089458,
319
+ "grad_norm": 0.5317464882215207,
320
+ "learning_rate": 1.3655410366448499e-06,
321
+ "log_odds_chosen": 0.10975570976734161,
322
+ "log_odds_ratio": -0.6656275987625122,
323
+ "logits/chosen": 32.76184844970703,
324
+ "logits/rejected": 32.081581115722656,
325
+ "logps/chosen": -0.7589792013168335,
326
+ "logps/rejected": -0.8160526156425476,
327
+ "loss": 1.2451,
328
+ "nll_loss": 1.1476625204086304,
329
+ "rewards/accuracies": 0.6000000238418579,
330
+ "rewards/chosen": -0.0758979320526123,
331
+ "rewards/margins": 0.0057073310017585754,
332
+ "rewards/rejected": -0.08160526305437088,
333
+ "step": 180
334
+ },
335
+ {
336
+ "epoch": 1.2140575079872205,
337
+ "grad_norm": 0.5186911330716555,
338
+ "learning_rate": 1.199115876325091e-06,
339
+ "log_odds_chosen": 0.04539128392934799,
340
+ "log_odds_ratio": -0.6990563869476318,
341
+ "logits/chosen": 33.35085678100586,
342
+ "logits/rejected": 33.3908805847168,
343
+ "logps/chosen": -0.7675826549530029,
344
+ "logps/rejected": -0.7923057675361633,
345
+ "loss": 1.2217,
346
+ "nll_loss": 1.161102294921875,
347
+ "rewards/accuracies": 0.53125,
348
+ "rewards/chosen": -0.07675826549530029,
349
+ "rewards/margins": 0.0024723131209611893,
350
+ "rewards/rejected": -0.07923058420419693,
351
+ "step": 190
352
+ },
353
+ {
354
+ "epoch": 1.2779552715654952,
355
+ "grad_norm": 0.5411315465201,
356
+ "learning_rate": 1.036474508437579e-06,
357
+ "log_odds_chosen": 0.07649532705545425,
358
+ "log_odds_ratio": -0.6858269572257996,
359
+ "logits/chosen": 30.826059341430664,
360
+ "logits/rejected": 30.824636459350586,
361
+ "logps/chosen": -0.786870002746582,
362
+ "logps/rejected": -0.8327391743659973,
363
+ "loss": 1.215,
364
+ "nll_loss": 1.165637493133545,
365
+ "rewards/accuracies": 0.578125,
366
+ "rewards/chosen": -0.07868699729442596,
367
+ "rewards/margins": 0.004586914554238319,
368
+ "rewards/rejected": -0.08327391743659973,
369
+ "step": 200
370
+ },
371
+ {
372
+ "epoch": 1.34185303514377,
373
+ "grad_norm": 0.5587119516125117,
374
+ "learning_rate": 8.796622425502193e-07,
375
+ "log_odds_chosen": 0.04922567307949066,
376
+ "log_odds_ratio": -0.6928130984306335,
377
+ "logits/chosen": 32.06999969482422,
378
+ "logits/rejected": 31.100805282592773,
379
+ "logps/chosen": -0.760046124458313,
380
+ "logps/rejected": -0.7879078388214111,
381
+ "loss": 1.2018,
382
+ "nll_loss": 1.1223350763320923,
383
+ "rewards/accuracies": 0.5218750238418579,
384
+ "rewards/chosen": -0.07600460201501846,
385
+ "rewards/margins": 0.00278617930598557,
386
+ "rewards/rejected": -0.07879078388214111,
387
+ "step": 210
388
+ },
389
+ {
390
+ "epoch": 1.4057507987220448,
391
+ "grad_norm": 0.5532006875711314,
392
+ "learning_rate": 7.30651083891141e-07,
393
+ "log_odds_chosen": 0.09409201890230179,
394
+ "log_odds_ratio": -0.6719040870666504,
395
+ "logits/chosen": 31.775470733642578,
396
+ "logits/rejected": 31.210159301757812,
397
+ "logps/chosen": -0.7408252954483032,
398
+ "logps/rejected": -0.7944933772087097,
399
+ "loss": 1.1848,
400
+ "nll_loss": 1.1082121133804321,
401
+ "rewards/accuracies": 0.6000000238418579,
402
+ "rewards/chosen": -0.07408253848552704,
403
+ "rewards/margins": 0.005366811528801918,
404
+ "rewards/rejected": -0.07944934070110321,
405
+ "step": 220
406
+ },
407
+ {
408
+ "epoch": 1.4696485623003195,
409
+ "grad_norm": 0.5478964489714558,
410
+ "learning_rate": 5.913149342387704e-07,
411
+ "log_odds_chosen": 0.022008871659636497,
412
+ "log_odds_ratio": -0.7079219818115234,
413
+ "logits/chosen": 30.458974838256836,
414
+ "logits/rejected": 30.637874603271484,
415
+ "logps/chosen": -0.7661860585212708,
416
+ "logps/rejected": -0.783053994178772,
417
+ "loss": 1.1753,
418
+ "nll_loss": 1.1201671361923218,
419
+ "rewards/accuracies": 0.5406249761581421,
420
+ "rewards/chosen": -0.07661859691143036,
421
+ "rewards/margins": 0.00168679840862751,
422
+ "rewards/rejected": -0.07830540090799332,
423
+ "step": 230
424
+ },
425
+ {
426
+ "epoch": 1.5335463258785942,
427
+ "grad_norm": 0.5614636317534148,
428
+ "learning_rate": 4.63406026519703e-07,
429
+ "log_odds_chosen": 0.12717078626155853,
430
+ "log_odds_ratio": -0.6581142544746399,
431
+ "logits/chosen": 31.024799346923828,
432
+ "logits/rejected": 30.477502822875977,
433
+ "logps/chosen": -0.7121531367301941,
434
+ "logps/rejected": -0.7798753380775452,
435
+ "loss": 1.1701,
436
+ "nll_loss": 1.073432207107544,
437
+ "rewards/accuracies": 0.596875011920929,
438
+ "rewards/chosen": -0.07121531665325165,
439
+ "rewards/margins": 0.006772211752831936,
440
+ "rewards/rejected": -0.07798753678798676,
441
+ "step": 240
442
+ },
443
+ {
444
+ "epoch": 1.5974440894568689,
445
+ "grad_norm": 0.5532326330060124,
446
+ "learning_rate": 3.4853288946298335e-07,
447
+ "log_odds_chosen": 0.031235849484801292,
448
+ "log_odds_ratio": -0.7052245140075684,
449
+ "logits/chosen": 31.759140014648438,
450
+ "logits/rejected": 31.2039737701416,
451
+ "logps/chosen": -0.7514272332191467,
452
+ "logps/rejected": -0.7712705135345459,
453
+ "loss": 1.1628,
454
+ "nll_loss": 1.0986436605453491,
455
+ "rewards/accuracies": 0.518750011920929,
456
+ "rewards/chosen": -0.07514272630214691,
457
+ "rewards/margins": 0.0019843343179672956,
458
+ "rewards/rejected": -0.07712705433368683,
459
+ "step": 250
460
+ },
461
+ {
462
+ "epoch": 1.6613418530351438,
463
+ "grad_norm": 0.5579681347407102,
464
+ "learning_rate": 2.48140119418046e-07,
465
+ "log_odds_chosen": 0.09361070394515991,
466
+ "log_odds_ratio": -0.6759673953056335,
467
+ "logits/chosen": 30.127365112304688,
468
+ "logits/rejected": 29.955150604248047,
469
+ "logps/chosen": -0.7377598881721497,
470
+ "logps/rejected": -0.789117157459259,
471
+ "loss": 1.1595,
472
+ "nll_loss": 1.0882426500320435,
473
+ "rewards/accuracies": 0.581250011920929,
474
+ "rewards/chosen": -0.0737759917974472,
475
+ "rewards/margins": 0.0051357327029109,
476
+ "rewards/rejected": -0.07891170680522919,
477
+ "step": 260
478
+ },
479
+ {
480
+ "epoch": 1.7252396166134185,
481
+ "grad_norm": 0.5388500509178578,
482
+ "learning_rate": 1.634902137174483e-07,
483
+ "log_odds_chosen": 0.06861741840839386,
484
+ "log_odds_ratio": -0.6897167563438416,
485
+ "logits/chosen": 31.801860809326172,
486
+ "logits/rejected": 30.827133178710938,
487
+ "logps/chosen": -0.7297223210334778,
488
+ "logps/rejected": -0.7675324082374573,
489
+ "loss": 1.15,
490
+ "nll_loss": 1.091435194015503,
491
+ "rewards/accuracies": 0.5562499761581421,
492
+ "rewards/chosen": -0.07297223806381226,
493
+ "rewards/margins": 0.003781010629609227,
494
+ "rewards/rejected": -0.07675323635339737,
495
+ "step": 270
496
+ },
497
+ {
498
+ "epoch": 1.7891373801916934,
499
+ "grad_norm": 0.5597110218019701,
500
+ "learning_rate": 9.564769404039419e-08,
501
+ "log_odds_chosen": 0.10912013053894043,
502
+ "log_odds_ratio": -0.6706396341323853,
503
+ "logits/chosen": 30.64389419555664,
504
+ "logits/rejected": 30.014019012451172,
505
+ "logps/chosen": -0.7340375185012817,
506
+ "logps/rejected": -0.7948423624038696,
507
+ "loss": 1.1518,
508
+ "nll_loss": 1.0911478996276855,
509
+ "rewards/accuracies": 0.590624988079071,
510
+ "rewards/chosen": -0.0734037533402443,
511
+ "rewards/margins": 0.00608047703281045,
512
+ "rewards/rejected": -0.0794842392206192,
513
+ "step": 280
514
+ },
515
+ {
516
+ "epoch": 1.8530351437699681,
517
+ "grad_norm": 0.5432963139659418,
518
+ "learning_rate": 4.546571943496969e-08,
519
+ "log_odds_chosen": 0.071634940803051,
520
+ "log_odds_ratio": -0.6880494356155396,
521
+ "logits/chosen": 31.523670196533203,
522
+ "logits/rejected": 31.051345825195312,
523
+ "logps/chosen": -0.7210476398468018,
524
+ "logps/rejected": -0.7614242434501648,
525
+ "loss": 1.1457,
526
+ "nll_loss": 1.078147292137146,
527
+ "rewards/accuracies": 0.53125,
528
+ "rewards/chosen": -0.07210476696491241,
529
+ "rewards/margins": 0.0040376619435846806,
530
+ "rewards/rejected": -0.07614242285490036,
531
+ "step": 290
532
+ },
533
+ {
534
+ "epoch": 1.9169329073482428,
535
+ "grad_norm": 0.5550233900085839,
536
+ "learning_rate": 1.357535734809795e-08,
537
+ "log_odds_chosen": 0.08968226611614227,
538
+ "log_odds_ratio": -0.6760362386703491,
539
+ "logits/chosen": 30.686954498291016,
540
+ "logits/rejected": 29.28194236755371,
541
+ "logps/chosen": -0.7248884439468384,
542
+ "logps/rejected": -0.7753577828407288,
543
+ "loss": 1.1474,
544
+ "nll_loss": 1.0696710348129272,
545
+ "rewards/accuracies": 0.5531250238418579,
546
+ "rewards/chosen": -0.07248884439468384,
547
+ "rewards/margins": 0.005046932026743889,
548
+ "rewards/rejected": -0.07753578573465347,
549
+ "step": 300
550
+ },
551
+ {
552
+ "epoch": 1.9808306709265175,
553
+ "grad_norm": 0.49758643306678957,
554
+ "learning_rate": 3.77647586240204e-10,
555
+ "log_odds_chosen": 0.048125725239515305,
556
+ "log_odds_ratio": -0.6949858665466309,
557
+ "logits/chosen": 30.8554744720459,
558
+ "logits/rejected": 30.65484046936035,
559
+ "logps/chosen": -0.7389064431190491,
560
+ "logps/rejected": -0.7689411640167236,
561
+ "loss": 1.1435,
562
+ "nll_loss": 1.0751014947891235,
563
+ "rewards/accuracies": 0.534375011920929,
564
+ "rewards/chosen": -0.07389064878225327,
565
+ "rewards/margins": 0.0030034759547561407,
566
+ "rewards/rejected": -0.0768941268324852,
567
+ "step": 310
568
+ },
569
+ {
570
+ "epoch": 1.9936102236421727,
571
+ "step": 312,
572
+ "total_flos": 0.0,
573
+ "train_loss": 1.3676127867820935,
574
+ "train_runtime": 5524.9246,
575
+ "train_samples_per_second": 7.24,
576
+ "train_steps_per_second": 0.056
577
+ }
578
+ ],
579
+ "logging_steps": 10,
580
+ "max_steps": 312,
581
+ "num_input_tokens_seen": 0,
582
+ "num_train_epochs": 2,
583
+ "save_steps": 500,
584
+ "stateful_callbacks": {
585
+ "TrainerControl": {
586
+ "args": {
587
+ "should_epoch_stop": false,
588
+ "should_evaluate": false,
589
+ "should_log": false,
590
+ "should_save": true,
591
+ "should_training_stop": true
592
+ },
593
+ "attributes": {}
594
+ }
595
+ },
596
+ "total_flos": 0.0,
597
+ "train_batch_size": 16,
598
+ "trial_name": null,
599
+ "trial_params": null
600
+ }