BraylonDash commited on
Commit
f36a8d3
1 Parent(s): 7827738

Model save

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
+ model-index:
10
+ - name: phi-2-gpo-test-longest-iter-v1-3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-gpo-test-longest-iter-v1-3
18
+
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0020
22
+ - Rewards/chosen: 0.0014
23
+ - Rewards/rejected: 0.0014
24
+ - Rewards/accuracies: 0.4990
25
+ - Rewards/margins: -0.0000
26
+ - Logps/rejected: -278.4662
27
+ - Logps/chosen: -306.2304
28
+ - Logits/rejected: 0.0952
29
+ - Logits/chosen: -0.0017
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 4
54
+ - total_train_batch_size: 16
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 4
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.0015 | 1.6 | 100 | 0.0019 | 0.0015 | 0.0007 | 0.5150 | 0.0008 | -278.5382 | -306.2194 | 0.0982 | 0.0015 |
65
+ | 0.0014 | 3.2 | 200 | 0.0019 | 0.0008 | 0.0001 | 0.5195 | 0.0008 | -278.6019 | -306.2870 | 0.0902 | -0.0069 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - PEFT 0.7.1
71
+ - Transformers 4.36.2
72
+ - Pytorch 2.2.1+cu121
73
+ - Datasets 2.14.6
74
+ - Tokenizers 0.15.2
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:068e6c0b050cb6508133e9367b9bac73dcdd4968c3a36208082f963f1dc51934
3
  size 41977616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556c29ba3ea71cd48413fbccd944cbce13c5c7f5e91bf1747f2f8b238dd036ae
3
  size 41977616
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "eval_logits/chosen": -0.0017466560238972306,
4
+ "eval_logits/rejected": 0.09523628652095795,
5
+ "eval_logps/chosen": -306.2304382324219,
6
+ "eval_logps/rejected": -278.4661865234375,
7
+ "eval_loss": 0.0019584796391427517,
8
+ "eval_rewards/accuracies": 0.49900001287460327,
9
+ "eval_rewards/chosen": 0.0013964117970317602,
10
+ "eval_rewards/margins": -2.741074604273308e-05,
11
+ "eval_rewards/rejected": 0.0014238222502171993,
12
+ "eval_runtime": 420.8882,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 4.752,
15
+ "eval_steps_per_second": 1.188,
16
+ "train_loss": 0.0014411601897013643,
17
+ "train_runtime": 2755.6406,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 1.452,
20
+ "train_steps_per_second": 0.09
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "eval_logits/chosen": -0.0017466560238972306,
4
+ "eval_logits/rejected": 0.09523628652095795,
5
+ "eval_logps/chosen": -306.2304382324219,
6
+ "eval_logps/rejected": -278.4661865234375,
7
+ "eval_loss": 0.0019584796391427517,
8
+ "eval_rewards/accuracies": 0.49900001287460327,
9
+ "eval_rewards/chosen": 0.0013964117970317602,
10
+ "eval_rewards/margins": -2.741074604273308e-05,
11
+ "eval_rewards/rejected": 0.0014238222502171993,
12
+ "eval_runtime": 420.8882,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 4.752,
15
+ "eval_steps_per_second": 1.188
16
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "train_loss": 0.0014411601897013643,
4
+ "train_runtime": 2755.6406,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 1.452,
7
+ "train_steps_per_second": 0.09
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.968,
5
+ "eval_steps": 100,
6
+ "global_step": 248,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 2.0000000000000002e-07,
14
+ "logits/chosen": 0.02732202410697937,
15
+ "logits/rejected": 0.16736462712287903,
16
+ "logps/chosen": -204.44515991210938,
17
+ "logps/rejected": -186.30474853515625,
18
+ "loss": 0.0014,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.16,
27
+ "learning_rate": 2.0000000000000003e-06,
28
+ "logits/chosen": 0.11495557427406311,
29
+ "logits/rejected": 0.14849303662776947,
30
+ "logps/chosen": -174.2774658203125,
31
+ "logps/rejected": -139.304443359375,
32
+ "loss": 0.0014,
33
+ "rewards/accuracies": 0.3958333432674408,
34
+ "rewards/chosen": 0.0010303932940587401,
35
+ "rewards/margins": 0.0013937298208475113,
36
+ "rewards/rejected": -0.0003633367014117539,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.32,
41
+ "learning_rate": 4.000000000000001e-06,
42
+ "logits/chosen": 0.19859905540943146,
43
+ "logits/rejected": 0.2755558490753174,
44
+ "logps/chosen": -186.06753540039062,
45
+ "logps/rejected": -150.23538208007812,
46
+ "loss": 0.0014,
47
+ "rewards/accuracies": 0.39375001192092896,
48
+ "rewards/chosen": -0.0006829313351772726,
49
+ "rewards/margins": 0.0010883348295465112,
50
+ "rewards/rejected": -0.0017712658736854792,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.48,
55
+ "learning_rate": 4.993800445762451e-06,
56
+ "logits/chosen": 0.10206829011440277,
57
+ "logits/rejected": 0.09731761366128922,
58
+ "logps/chosen": -189.70846557617188,
59
+ "logps/rejected": -176.63827514648438,
60
+ "loss": 0.0014,
61
+ "rewards/accuracies": 0.4124999940395355,
62
+ "rewards/chosen": 0.0010909180855378509,
63
+ "rewards/margins": 0.0010104707907885313,
64
+ "rewards/rejected": 8.044719288591295e-05,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.64,
69
+ "learning_rate": 4.944388344834205e-06,
70
+ "logits/chosen": 0.21991512179374695,
71
+ "logits/rejected": 0.13409800827503204,
72
+ "logps/chosen": -178.78292846679688,
73
+ "logps/rejected": -151.7918243408203,
74
+ "loss": 0.0016,
75
+ "rewards/accuracies": 0.33125001192092896,
76
+ "rewards/chosen": -0.0007118875510059297,
77
+ "rewards/margins": -0.000794673920609057,
78
+ "rewards/rejected": 8.278638415504247e-05,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.8,
83
+ "learning_rate": 4.8465431931347904e-06,
84
+ "logits/chosen": 0.10882525146007538,
85
+ "logits/rejected": 0.16875343024730682,
86
+ "logps/chosen": -185.3433074951172,
87
+ "logps/rejected": -174.74209594726562,
88
+ "loss": 0.0015,
89
+ "rewards/accuracies": 0.38749998807907104,
90
+ "rewards/chosen": 0.0009307868895120919,
91
+ "rewards/margins": 0.000729514576960355,
92
+ "rewards/rejected": 0.00020127242896705866,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.96,
97
+ "learning_rate": 4.702203692102539e-06,
98
+ "logits/chosen": 0.1939707249403,
99
+ "logits/rejected": 0.18511822819709778,
100
+ "logps/chosen": -194.64193725585938,
101
+ "logps/rejected": -159.30172729492188,
102
+ "loss": 0.0015,
103
+ "rewards/accuracies": 0.40625,
104
+ "rewards/chosen": 0.0016695528756827116,
105
+ "rewards/margins": 8.004475239431486e-05,
106
+ "rewards/rejected": 0.0015895080287009478,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 1.12,
111
+ "learning_rate": 4.514229781074239e-06,
112
+ "logits/chosen": 0.1889864206314087,
113
+ "logits/rejected": 0.1525781750679016,
114
+ "logps/chosen": -196.69769287109375,
115
+ "logps/rejected": -172.0983428955078,
116
+ "loss": 0.0015,
117
+ "rewards/accuracies": 0.40625,
118
+ "rewards/chosen": 0.0013059942284598947,
119
+ "rewards/margins": -0.0003628497361205518,
120
+ "rewards/rejected": 0.0016688440227881074,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 1.28,
125
+ "learning_rate": 4.286345970517195e-06,
126
+ "logits/chosen": 0.1526193767786026,
127
+ "logits/rejected": 0.13564926385879517,
128
+ "logps/chosen": -176.2852020263672,
129
+ "logps/rejected": -149.50759887695312,
130
+ "loss": 0.0014,
131
+ "rewards/accuracies": 0.4437499940395355,
132
+ "rewards/chosen": 0.0020406683906912804,
133
+ "rewards/margins": 0.0011222332250326872,
134
+ "rewards/rejected": 0.0009184351074509323,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 1.44,
139
+ "learning_rate": 4.023067544670082e-06,
140
+ "logits/chosen": 0.19515976309776306,
141
+ "logits/rejected": 0.16219770908355713,
142
+ "logps/chosen": -183.90406799316406,
143
+ "logps/rejected": -157.0895538330078,
144
+ "loss": 0.0017,
145
+ "rewards/accuracies": 0.375,
146
+ "rewards/chosen": -0.001846942352131009,
147
+ "rewards/margins": -0.0017863952089101076,
148
+ "rewards/rejected": -6.0547237808350474e-05,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 1.6,
153
+ "learning_rate": 3.7296110958116845e-06,
154
+ "logits/chosen": 0.16685011982917786,
155
+ "logits/rejected": 0.05337408185005188,
156
+ "logps/chosen": -177.84762573242188,
157
+ "logps/rejected": -152.65464782714844,
158
+ "loss": 0.0015,
159
+ "rewards/accuracies": 0.4312500059604645,
160
+ "rewards/chosen": -0.0007909245905466378,
161
+ "rewards/margins": 0.0006749060703441501,
162
+ "rewards/rejected": -0.001465830602683127,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 1.6,
167
+ "eval_logits/chosen": 0.0015224728267639875,
168
+ "eval_logits/rejected": 0.09820695966482162,
169
+ "eval_logps/chosen": -306.2193603515625,
170
+ "eval_logps/rejected": -278.5382080078125,
171
+ "eval_loss": 0.001858623931184411,
172
+ "eval_rewards/accuracies": 0.5149999856948853,
173
+ "eval_rewards/chosen": 0.0015068423235788941,
174
+ "eval_rewards/margins": 0.0008032902260310948,
175
+ "eval_rewards/rejected": 0.0007035521557554603,
176
+ "eval_runtime": 420.6562,
177
+ "eval_samples_per_second": 4.754,
178
+ "eval_steps_per_second": 1.189,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 1.76,
183
+ "learning_rate": 3.4117911628292944e-06,
184
+ "logits/chosen": 0.22267238795757294,
185
+ "logits/rejected": 0.19200441241264343,
186
+ "logps/chosen": -205.3648223876953,
187
+ "logps/rejected": -172.17208862304688,
188
+ "loss": 0.0015,
189
+ "rewards/accuracies": 0.38749998807907104,
190
+ "rewards/chosen": -0.002072205301374197,
191
+ "rewards/margins": -0.0004376435244921595,
192
+ "rewards/rejected": -0.0016345620388165116,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 1.92,
197
+ "learning_rate": 3.075905022087675e-06,
198
+ "logits/chosen": 0.1819857358932495,
199
+ "logits/rejected": 0.23743709921836853,
200
+ "logps/chosen": -184.61007690429688,
201
+ "logps/rejected": -160.8583221435547,
202
+ "loss": 0.0013,
203
+ "rewards/accuracies": 0.45625001192092896,
204
+ "rewards/chosen": 0.0009784279391169548,
205
+ "rewards/margins": 0.0018532350659370422,
206
+ "rewards/rejected": -0.000874807417858392,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 2.08,
211
+ "learning_rate": 2.728607913349464e-06,
212
+ "logits/chosen": 0.09743748605251312,
213
+ "logits/rejected": 0.11662141233682632,
214
+ "logps/chosen": -176.29867553710938,
215
+ "logps/rejected": -140.44961547851562,
216
+ "loss": 0.0014,
217
+ "rewards/accuracies": 0.512499988079071,
218
+ "rewards/chosen": 0.0014981284039095044,
219
+ "rewards/margins": 0.0015437586698681116,
220
+ "rewards/rejected": -4.563046604744159e-05,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 2.24,
225
+ "learning_rate": 2.376781173017589e-06,
226
+ "logits/chosen": 0.03450363129377365,
227
+ "logits/rejected": 0.09166844189167023,
228
+ "logps/chosen": -191.46543884277344,
229
+ "logps/rejected": -154.39138793945312,
230
+ "loss": 0.0014,
231
+ "rewards/accuracies": 0.4625000059604645,
232
+ "rewards/chosen": 0.001418759347870946,
233
+ "rewards/margins": 0.0005404851399362087,
234
+ "rewards/rejected": 0.0008782741497270763,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 2.4,
239
+ "learning_rate": 2.0273958875043877e-06,
240
+ "logits/chosen": 0.15574321150779724,
241
+ "logits/rejected": 0.15428531169891357,
242
+ "logps/chosen": -179.232421875,
243
+ "logps/rejected": -154.8232421875,
244
+ "loss": 0.0014,
245
+ "rewards/accuracies": 0.48124998807907104,
246
+ "rewards/chosen": 0.0011932613560929894,
247
+ "rewards/margins": 0.0013866318622604012,
248
+ "rewards/rejected": -0.00019337031699251384,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 2.56,
253
+ "learning_rate": 1.6873747682962393e-06,
254
+ "logits/chosen": 0.2517469525337219,
255
+ "logits/rejected": 0.1588711440563202,
256
+ "logps/chosen": -193.44821166992188,
257
+ "logps/rejected": -170.86611938476562,
258
+ "loss": 0.0015,
259
+ "rewards/accuracies": 0.44999998807907104,
260
+ "rewards/chosen": -0.00030686514219269156,
261
+ "rewards/margins": 0.0002994650858454406,
262
+ "rewards/rejected": -0.0006063304608687758,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 2.72,
267
+ "learning_rate": 1.363454985517803e-06,
268
+ "logits/chosen": 0.20987768471240997,
269
+ "logits/rejected": 0.08343996852636337,
270
+ "logps/chosen": -187.78610229492188,
271
+ "logps/rejected": -167.81800842285156,
272
+ "loss": 0.0014,
273
+ "rewards/accuracies": 0.46875,
274
+ "rewards/chosen": 0.002795418258756399,
275
+ "rewards/margins": 0.0018930940423160791,
276
+ "rewards/rejected": 0.0009023241582326591,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 2.88,
281
+ "learning_rate": 1.062054677808238e-06,
282
+ "logits/chosen": 0.23503074049949646,
283
+ "logits/rejected": 0.2113850861787796,
284
+ "logps/chosen": -186.79861450195312,
285
+ "logps/rejected": -162.50196838378906,
286
+ "loss": 0.0014,
287
+ "rewards/accuracies": 0.4312500059604645,
288
+ "rewards/chosen": 0.0017884777626022696,
289
+ "rewards/margins": 0.0021424146834760904,
290
+ "rewards/rejected": -0.0003539369790814817,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 3.04,
295
+ "learning_rate": 7.891457834794711e-07,
296
+ "logits/chosen": 0.138333261013031,
297
+ "logits/rejected": 0.21794256567955017,
298
+ "logps/chosen": -179.44418334960938,
299
+ "logps/rejected": -158.67454528808594,
300
+ "loss": 0.0014,
301
+ "rewards/accuracies": 0.45625001192092896,
302
+ "rewards/chosen": 0.0017921695252880454,
303
+ "rewards/margins": 0.0020220079459249973,
304
+ "rewards/rejected": -0.00022983844974078238,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 3.2,
309
+ "learning_rate": 5.501357126768117e-07,
310
+ "logits/chosen": 0.2148284912109375,
311
+ "logits/rejected": 0.1433248072862625,
312
+ "logps/chosen": -186.4739532470703,
313
+ "logps/rejected": -167.48558044433594,
314
+ "loss": 0.0014,
315
+ "rewards/accuracies": 0.4437499940395355,
316
+ "rewards/chosen": 0.002506103366613388,
317
+ "rewards/margins": 0.0016806632047519088,
318
+ "rewards/rejected": 0.0008254402200691402,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 3.2,
323
+ "eval_logits/chosen": -0.00685009965673089,
324
+ "eval_logits/rejected": 0.09018866717815399,
325
+ "eval_logps/chosen": -306.28704833984375,
326
+ "eval_logps/rejected": -278.6018981933594,
327
+ "eval_loss": 0.0019364985637366772,
328
+ "eval_rewards/accuracies": 0.5195000171661377,
329
+ "eval_rewards/chosen": 0.0008296637679450214,
330
+ "eval_rewards/margins": 0.0007632386405020952,
331
+ "eval_rewards/rejected": 6.642500375164673e-05,
332
+ "eval_runtime": 420.9095,
333
+ "eval_samples_per_second": 4.752,
334
+ "eval_steps_per_second": 1.188,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 3.36,
339
+ "learning_rate": 3.4976020508682345e-07,
340
+ "logits/chosen": 0.10354860126972198,
341
+ "logits/rejected": 0.1740628331899643,
342
+ "logps/chosen": -187.9175262451172,
343
+ "logps/rejected": -160.59580993652344,
344
+ "loss": 0.0014,
345
+ "rewards/accuracies": 0.4749999940395355,
346
+ "rewards/chosen": 0.0003708422009367496,
347
+ "rewards/margins": 0.0009719420922920108,
348
+ "rewards/rejected": -0.0006010999786667526,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 3.52,
353
+ "learning_rate": 1.9198949610721273e-07,
354
+ "logits/chosen": 0.14361225068569183,
355
+ "logits/rejected": 0.09037239849567413,
356
+ "logps/chosen": -183.87716674804688,
357
+ "logps/rejected": -157.98300170898438,
358
+ "loss": 0.0013,
359
+ "rewards/accuracies": 0.4749999940395355,
360
+ "rewards/chosen": 0.0009205196984112263,
361
+ "rewards/margins": 0.0023204255849123,
362
+ "rewards/rejected": -0.0013999061193317175,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 3.68,
367
+ "learning_rate": 7.994965069994143e-08,
368
+ "logits/chosen": 0.20537514984607697,
369
+ "logits/rejected": 0.18879783153533936,
370
+ "logps/chosen": -193.15943908691406,
371
+ "logps/rejected": -162.2894744873047,
372
+ "loss": 0.0015,
373
+ "rewards/accuracies": 0.39375001192092896,
374
+ "rewards/chosen": -0.00020981582929380238,
375
+ "rewards/margins": 0.0006767899030819535,
376
+ "rewards/rejected": -0.0008866057032719254,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 3.84,
381
+ "learning_rate": 1.5860623616664183e-08,
382
+ "logits/chosen": 0.13774822652339935,
383
+ "logits/rejected": 0.09208400547504425,
384
+ "logps/chosen": -189.96868896484375,
385
+ "logps/rejected": -159.98782348632812,
386
+ "loss": 0.0016,
387
+ "rewards/accuracies": 0.34375,
388
+ "rewards/chosen": -6.72071473672986e-06,
389
+ "rewards/margins": -0.0008294621366076171,
390
+ "rewards/rejected": 0.0008227415382862091,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 3.97,
395
+ "step": 248,
396
+ "total_flos": 0.0,
397
+ "train_loss": 0.0014411601897013643,
398
+ "train_runtime": 2755.6406,
399
+ "train_samples_per_second": 1.452,
400
+ "train_steps_per_second": 0.09
401
+ }
402
+ ],
403
+ "logging_steps": 10,
404
+ "max_steps": 248,
405
+ "num_input_tokens_seen": 0,
406
+ "num_train_epochs": 4,
407
+ "save_steps": 100,
408
+ "total_flos": 0.0,
409
+ "train_batch_size": 4,
410
+ "trial_name": null,
411
+ "trial_params": null
412
+ }