RikkiXu commited on
Commit
9c61062
1 Parent(s): 5ae4bb7

Model save

Browse files
README.md CHANGED
@@ -16,14 +16,14 @@ should probably proofread and complete it, then remove this comment. -->
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
- - Rewards/chosen: -1.6342
20
- - Rewards/rejected: -1.6342
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
- - Logps/rejected: -158.8454
24
- - Logps/chosen: -158.8454
25
- - Logits/rejected: -3.2278
26
- - Logits/chosen: -3.2278
27
 
28
  ## Model description
29
 
@@ -42,7 +42,7 @@ More information needed
42
  ### Training hyperparameters
43
 
44
  The following hyperparameters were used during training:
45
- - learning_rate: 5e-08
46
  - train_batch_size: 8
47
  - eval_batch_size: 8
48
  - seed: 42
@@ -53,20 +53,21 @@ The following hyperparameters were used during training:
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
 
56
  - num_epochs: 1
57
 
58
  ### Training results
59
 
60
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
61
- |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
62
- | 0.3253 | 0.2857 | 100 | 0.6931 | -1.6970 | -1.6970 | 0.0 | 0.0 | -158.9711 | -158.9711 | -3.2366 | -3.2366 |
63
- | 0.3071 | 0.5714 | 200 | 0.6931 | -2.0914 | -2.0914 | 0.0 | 0.0 | -159.7597 | -159.7597 | -3.2287 | -3.2287 |
64
- | 0.3336 | 0.8571 | 300 | 0.6931 | -1.6342 | -1.6342 | 0.0 | 0.0 | -158.8454 | -158.8454 | -3.2278 | -3.2278 |
65
 
66
 
67
  ### Framework versions
68
 
69
- - Transformers 4.41.1
70
  - Pytorch 2.1.2+cu118
71
  - Datasets 2.16.1
72
- - Tokenizers 0.19.1
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
  - Loss: 0.6931
19
+ - Rewards/chosen: -8.4881
20
+ - Rewards/rejected: -8.4881
21
  - Rewards/accuracies: 0.0
22
  - Rewards/margins: 0.0
23
+ - Logps/rejected: -164.0651
24
+ - Logps/chosen: -164.0651
25
+ - Logits/rejected: -3.2224
26
+ - Logits/chosen: -3.2224
27
 
28
  ## Model description
29
 
 
42
  ### Training hyperparameters
43
 
44
  The following hyperparameters were used during training:
45
+ - learning_rate: 5e-07
46
  - train_batch_size: 8
47
  - eval_batch_size: 8
48
  - seed: 42
 
53
  - total_eval_batch_size: 64
54
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
  - lr_scheduler_type: cosine
56
+ - lr_scheduler_warmup_ratio: 0.1
57
  - num_epochs: 1
58
 
59
  ### Training results
60
 
61
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.7723 | 0.29 | 100 | 0.6931 | -2.0247 | -2.0247 | 0.0 | 0.0 | -157.6017 | -157.6017 | -3.1472 | -3.1472 |
64
+ | 0.7717 | 0.57 | 200 | 0.6931 | -12.0830 | -12.0830 | 0.0 | 0.0 | -167.6601 | -167.6601 | -3.1635 | -3.1635 |
65
+ | 0.782 | 0.86 | 300 | 0.6931 | -8.4881 | -8.4881 | 0.0 | 0.0 | -164.0651 | -164.0651 | -3.2224 | -3.2224 |
66
 
67
 
68
  ### Framework versions
69
 
70
+ - Transformers 4.38.2
71
  - Pytorch 2.1.2+cu118
72
  - Datasets 2.16.1
73
+ - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "total_flos": 0.0,
4
- "train_loss": 0.34604193687438967,
5
- "train_runtime": 5793.0735,
6
  "train_samples": 44755,
7
- "train_samples_per_second": 7.726,
8
- "train_steps_per_second": 0.06
9
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.7428068714482444,
4
+ "train_runtime": 5299.044,
 
5
  "train_samples": 44755,
6
+ "train_samples_per_second": 8.446,
7
+ "train_steps_per_second": 0.066
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.38.2",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
- "transformers_version": "4.41.1"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
+ "transformers_version": "4.38.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5620686540e3b69a8dff87d7dbb09524fd94d7f2126dc65a769fe4d670bd4a90
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8a16dcdeac389a7edb934e74708a95a01cd1e632e8f90a73780fef9e4ba9a79
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e57f83d8c347274f3c77cf169473468937aa6cee843f09e0709b1c3248e0fb8
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:830c15b3f985c15ad7fd7fed750f2e2465dfc40ee9aa0da31053db52e903da60
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:639c1f7e701b304b9eb6019aefbf2f2172c8a61237b358c0f6da984843c038be
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9abe98aec4f8efa4737df5455a2ed808d0f8ebad162d17a1674f84f71648f8d
3
  size 4540532728
runs/May31_01-40-50_n136-100-194/events.out.tfevents.1717090981.n136-100-194.1984510.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:99bc78ded1b39fbe01859f8d995f1b0573567e7d45edc6350ee4a1fa2d31ce95
3
- size 28312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec8a38bec5df134d51ff42aa4b854f577d10febb82aa267f83550844de67a812
3
+ size 32106
tokenizer.json CHANGED
@@ -152,7 +152,6 @@
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
155
- "ignore_merges": false,
156
  "vocab": {
157
  "<unk>": 0,
158
  "<s>": 1,
 
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
 
155
  "vocab": {
156
  "<unk>": 0,
157
  "<s>": 1,
train_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "total_flos": 0.0,
4
- "train_loss": 0.34604193687438967,
5
- "train_runtime": 5793.0735,
6
  "train_samples": 44755,
7
- "train_samples_per_second": 7.726,
8
- "train_steps_per_second": 0.06
9
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.7428068714482444,
4
+ "train_runtime": 5299.044,
 
5
  "train_samples": 44755,
6
+ "train_samples_per_second": 8.446,
7
+ "train_steps_per_second": 0.066
8
  }
trainer_state.json CHANGED
@@ -9,14 +9,14 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.002857142857142857,
13
- "grad_norm": 1533.819121390321,
14
- "learning_rate": 4.9998992904271775e-08,
15
  "logits/chosen": -4.185730934143066,
16
  "logits/rejected": -4.509836196899414,
17
  "logps/chosen": -274.000732421875,
18
  "logps/rejected": -205.8054962158203,
19
- "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
@@ -24,586 +24,586 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.02857142857142857,
28
- "grad_norm": 1668.215158817105,
29
- "learning_rate": 4.9899357349880975e-08,
30
- "logits/chosen": -4.21472692489624,
31
- "logits/rejected": -4.488520622253418,
32
- "logps/chosen": -318.4333801269531,
33
- "logps/rejected": -257.2440185546875,
34
- "loss": 0.6858,
35
- "rewards/accuracies": 0.4930555522441864,
36
- "rewards/chosen": 0.03888694569468498,
37
- "rewards/margins": 0.020132217556238174,
38
- "rewards/rejected": 0.018754728138446808,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.05714285714285714,
43
- "grad_norm": 1345.3140295690048,
44
- "learning_rate": 4.959823971496574e-08,
45
- "logits/chosen": -4.247828006744385,
46
- "logits/rejected": -4.502226829528809,
47
- "logps/chosen": -304.4950866699219,
48
- "logps/rejected": -244.0281219482422,
49
- "loss": 0.6107,
50
- "rewards/accuracies": 0.7250000238418579,
51
- "rewards/chosen": 0.37075769901275635,
52
- "rewards/margins": 0.27177533507347107,
53
- "rewards/rejected": 0.09898237138986588,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.08571428571428572,
58
- "grad_norm": 1125.2250514399805,
59
- "learning_rate": 4.9099071517396326e-08,
60
- "logits/chosen": -4.306983947753906,
61
- "logits/rejected": -4.5679826736450195,
62
- "logps/chosen": -304.89337158203125,
63
- "logps/rejected": -258.8996887207031,
64
- "loss": 0.5148,
65
- "rewards/accuracies": 0.78125,
66
- "rewards/chosen": 0.8106807470321655,
67
- "rewards/margins": 0.5930811762809753,
68
- "rewards/rejected": 0.21759963035583496,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.11428571428571428,
73
- "grad_norm": 1094.2742476604449,
74
- "learning_rate": 4.8405871765993426e-08,
75
- "logits/chosen": -4.310123443603516,
76
- "logits/rejected": -4.577700614929199,
77
- "logps/chosen": -292.9500427246094,
78
- "logps/rejected": -234.384765625,
79
- "loss": 0.4433,
80
- "rewards/accuracies": 0.7749999761581421,
81
- "rewards/chosen": 1.2921748161315918,
82
- "rewards/margins": 0.8818836212158203,
83
- "rewards/rejected": 0.41029104590415955,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.14285714285714285,
88
- "grad_norm": 866.0178113474443,
89
- "learning_rate": 4.7524221697560474e-08,
90
- "logits/chosen": -4.307633876800537,
91
- "logits/rejected": -4.553541660308838,
92
- "logps/chosen": -298.9430847167969,
93
- "logps/rejected": -252.4582977294922,
94
- "loss": 0.3906,
95
- "rewards/accuracies": 0.8374999761581421,
96
- "rewards/chosen": 1.8912757635116577,
97
- "rewards/margins": 1.240208387374878,
98
- "rewards/rejected": 0.6510674357414246,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.17142857142857143,
103
- "grad_norm": 837.0640990054532,
104
- "learning_rate": 4.646121984004665e-08,
105
- "logits/chosen": -4.309387683868408,
106
- "logits/rejected": -4.5390214920043945,
107
- "logps/chosen": -307.09820556640625,
108
- "logps/rejected": -260.90191650390625,
109
- "loss": 0.3814,
110
- "rewards/accuracies": 0.8125,
111
- "rewards/chosen": 2.126704216003418,
112
- "rewards/margins": 1.2716383934020996,
113
- "rewards/rejected": 0.8550659418106079,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
- "grad_norm": 985.5848045316892,
119
- "learning_rate": 4.522542485937369e-08,
120
- "logits/chosen": -4.4262213706970215,
121
- "logits/rejected": -4.558186054229736,
122
- "logps/chosen": -284.1617736816406,
123
- "logps/rejected": -235.8411102294922,
124
- "loss": 0.3695,
125
- "rewards/accuracies": 0.8187500238418579,
126
- "rewards/chosen": 2.3711953163146973,
127
- "rewards/margins": 1.6806213855743408,
128
- "rewards/rejected": 0.690574049949646,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.22857142857142856,
133
- "grad_norm": 951.7016793785169,
134
- "learning_rate": 4.3826786650090273e-08,
135
- "logits/chosen": -4.280414581298828,
136
- "logits/rejected": -4.534539222717285,
137
- "logps/chosen": -290.68597412109375,
138
- "logps/rejected": -239.3174591064453,
139
- "loss": 0.3466,
140
- "rewards/accuracies": 0.8687499761581421,
141
- "rewards/chosen": 2.500866413116455,
142
- "rewards/margins": 1.933850646018982,
143
- "rewards/rejected": 0.5670154094696045,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.2571428571428571,
148
- "grad_norm": 868.4695107443264,
149
- "learning_rate": 4.2276566224671614e-08,
150
- "logits/chosen": -4.205895900726318,
151
- "logits/rejected": -4.440802574157715,
152
- "logps/chosen": -301.97674560546875,
153
- "logps/rejected": -257.63153076171875,
154
- "loss": 0.3457,
155
- "rewards/accuracies": 0.7875000238418579,
156
- "rewards/chosen": 2.7932560443878174,
157
- "rewards/margins": 1.9473575353622437,
158
- "rewards/rejected": 0.8458986282348633,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.2857142857142857,
163
- "grad_norm": 1324.6980610698608,
164
- "learning_rate": 4.058724504646834e-08,
165
- "logits/chosen": -4.310187816619873,
166
- "logits/rejected": -4.531655311584473,
167
- "logps/chosen": -290.22113037109375,
168
- "logps/rejected": -240.4936981201172,
169
- "loss": 0.3253,
170
- "rewards/accuracies": 0.856249988079071,
171
- "rewards/chosen": 2.803382396697998,
172
- "rewards/margins": 1.9496362209320068,
173
- "rewards/rejected": 0.8537459373474121,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.2857142857142857,
178
- "eval_logits/chosen": -3.2365729808807373,
179
- "eval_logits/rejected": -3.2365729808807373,
180
- "eval_logps/chosen": -158.97109985351562,
181
- "eval_logps/rejected": -158.97109985351562,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
- "eval_rewards/chosen": -1.697042465209961,
185
  "eval_rewards/margins": 0.0,
186
- "eval_rewards/rejected": -1.697042465209961,
187
- "eval_runtime": 1.5274,
188
- "eval_samples_per_second": 0.655,
189
- "eval_steps_per_second": 0.655,
190
  "step": 100
191
  },
192
  {
193
- "epoch": 0.3142857142857143,
194
- "grad_norm": 904.1535779728154,
195
- "learning_rate": 3.8772424536302564e-08,
196
- "logits/chosen": -4.3289313316345215,
197
- "logits/rejected": -4.571288108825684,
198
- "logps/chosen": -297.5599060058594,
199
- "logps/rejected": -249.8257293701172,
200
- "loss": 0.345,
201
- "rewards/accuracies": 0.893750011920929,
202
- "rewards/chosen": 2.935572385787964,
203
- "rewards/margins": 2.1961002349853516,
204
- "rewards/rejected": 0.7394723892211914,
205
  "step": 110
206
  },
207
  {
208
- "epoch": 0.34285714285714286,
209
- "grad_norm": 1086.0405455439977,
210
- "learning_rate": 3.6846716561824964e-08,
211
- "logits/chosen": -4.373476982116699,
212
- "logits/rejected": -4.619527816772461,
213
- "logps/chosen": -287.13385009765625,
214
- "logps/rejected": -237.6899871826172,
215
- "loss": 0.3263,
216
- "rewards/accuracies": 0.824999988079071,
217
- "rewards/chosen": 2.900177478790283,
218
- "rewards/margins": 2.18641996383667,
219
- "rewards/rejected": 0.7137576937675476,
220
  "step": 120
221
  },
222
  {
223
- "epoch": 0.37142857142857144,
224
- "grad_norm": 753.3076911412429,
225
- "learning_rate": 3.482562579134809e-08,
226
- "logits/chosen": -4.375483512878418,
227
- "logits/rejected": -4.624612331390381,
228
- "logps/chosen": -276.9853515625,
229
- "logps/rejected": -218.39633178710938,
230
- "loss": 0.3228,
231
- "rewards/accuracies": 0.8500000238418579,
232
- "rewards/chosen": 2.8573689460754395,
233
- "rewards/margins": 2.0853238105773926,
234
- "rewards/rejected": 0.7720457315444946,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
- "grad_norm": 840.6908677183937,
240
- "learning_rate": 3.272542485937369e-08,
241
- "logits/chosen": -4.290375232696533,
242
- "logits/rejected": -4.607089996337891,
243
- "logps/chosen": -294.1933288574219,
244
- "logps/rejected": -239.8271942138672,
245
- "loss": 0.3428,
246
- "rewards/accuracies": 0.875,
247
- "rewards/chosen": 2.845167875289917,
248
- "rewards/margins": 2.3569939136505127,
249
- "rewards/rejected": 0.48817411065101624,
250
  "step": 140
251
  },
252
  {
253
- "epoch": 0.42857142857142855,
254
- "grad_norm": 1010.0880176318159,
255
- "learning_rate": 3.056302334890786e-08,
256
- "logits/chosen": -4.25304651260376,
257
- "logits/rejected": -4.517129421234131,
258
- "logps/chosen": -293.35797119140625,
259
- "logps/rejected": -250.48641967773438,
260
- "loss": 0.313,
261
- "rewards/accuracies": 0.8374999761581421,
262
- "rewards/chosen": 3.026089906692505,
263
- "rewards/margins": 2.4186887741088867,
264
- "rewards/rejected": 0.6074013113975525,
265
  "step": 150
266
  },
267
  {
268
- "epoch": 0.45714285714285713,
269
- "grad_norm": 881.3705397776472,
270
- "learning_rate": 2.8355831645441387e-08,
271
- "logits/chosen": -4.279029369354248,
272
- "logits/rejected": -4.569698810577393,
273
- "logps/chosen": -294.6650695800781,
274
- "logps/rejected": -235.29067993164062,
275
- "loss": 0.338,
276
- "rewards/accuracies": 0.8374999761581421,
277
- "rewards/chosen": 3.13142728805542,
278
- "rewards/margins": 2.386794090270996,
279
- "rewards/rejected": 0.7446335554122925,
280
  "step": 160
281
  },
282
  {
283
- "epoch": 0.4857142857142857,
284
- "grad_norm": 857.1741181734847,
285
- "learning_rate": 2.6121620758762875e-08,
286
- "logits/chosen": -4.228262901306152,
287
- "logits/rejected": -4.463376522064209,
288
- "logps/chosen": -294.2881774902344,
289
- "logps/rejected": -241.1619415283203,
290
- "loss": 0.3264,
291
- "rewards/accuracies": 0.875,
292
- "rewards/chosen": 3.1862380504608154,
293
- "rewards/margins": 2.542022228240967,
294
- "rewards/rejected": 0.6442161202430725,
295
  "step": 170
296
  },
297
  {
298
- "epoch": 0.5142857142857142,
299
- "grad_norm": 923.726950031985,
300
- "learning_rate": 2.3878379241237133e-08,
301
- "logits/chosen": -4.36321496963501,
302
- "logits/rejected": -4.595080375671387,
303
- "logps/chosen": -283.4067687988281,
304
- "logps/rejected": -241.1226043701172,
305
- "loss": 0.3098,
306
- "rewards/accuracies": 0.8374999761581421,
307
- "rewards/chosen": 3.2351880073547363,
308
- "rewards/margins": 2.5559356212615967,
309
- "rewards/rejected": 0.679252028465271,
310
  "step": 180
311
  },
312
  {
313
- "epoch": 0.5428571428571428,
314
- "grad_norm": 792.8076625116224,
315
- "learning_rate": 2.164416835455862e-08,
316
- "logits/chosen": -4.323281288146973,
317
- "logits/rejected": -4.492599964141846,
318
- "logps/chosen": -305.88592529296875,
319
- "logps/rejected": -257.3919982910156,
320
- "loss": 0.2759,
321
- "rewards/accuracies": 0.893750011920929,
322
- "rewards/chosen": 3.2042744159698486,
323
- "rewards/margins": 2.557298421859741,
324
- "rewards/rejected": 0.6469758152961731,
325
  "step": 190
326
  },
327
  {
328
- "epoch": 0.5714285714285714,
329
- "grad_norm": 757.2657183486399,
330
- "learning_rate": 1.943697665109214e-08,
331
- "logits/chosen": -4.354622840881348,
332
- "logits/rejected": -4.594660758972168,
333
- "logps/chosen": -290.7183532714844,
334
- "logps/rejected": -249.0703582763672,
335
- "loss": 0.3071,
336
  "rewards/accuracies": 0.856249988079071,
337
- "rewards/chosen": 3.2066097259521484,
338
- "rewards/margins": 2.334256649017334,
339
- "rewards/rejected": 0.8723530769348145,
340
  "step": 200
341
  },
342
  {
343
- "epoch": 0.5714285714285714,
344
- "eval_logits/chosen": -3.2287425994873047,
345
- "eval_logits/rejected": -3.2287425994873047,
346
- "eval_logps/chosen": -159.75973510742188,
347
- "eval_logps/rejected": -159.75973510742188,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
- "eval_rewards/chosen": -2.0913619995117188,
351
  "eval_rewards/margins": 0.0,
352
- "eval_rewards/rejected": -2.0913619995117188,
353
- "eval_runtime": 1.503,
354
- "eval_samples_per_second": 0.665,
355
- "eval_steps_per_second": 0.665,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
- "grad_norm": 891.2229023091579,
361
- "learning_rate": 1.7274575140626317e-08,
362
- "logits/chosen": -4.2898783683776855,
363
- "logits/rejected": -4.5833001136779785,
364
- "logps/chosen": -304.8259582519531,
365
- "logps/rejected": -254.60757446289062,
366
- "loss": 0.3024,
367
  "rewards/accuracies": 0.8687499761581421,
368
- "rewards/chosen": 3.1973044872283936,
369
- "rewards/margins": 2.722479820251465,
370
- "rewards/rejected": 0.47482460737228394,
371
  "step": 210
372
  },
373
  {
374
- "epoch": 0.6285714285714286,
375
- "grad_norm": 938.9689090425716,
376
- "learning_rate": 1.517437420865191e-08,
377
- "logits/chosen": -4.243287086486816,
378
- "logits/rejected": -4.58882999420166,
379
- "logps/chosen": -294.97943115234375,
380
- "logps/rejected": -225.01730346679688,
381
- "loss": 0.2916,
382
- "rewards/accuracies": 0.856249988079071,
383
- "rewards/chosen": 3.2672336101531982,
384
- "rewards/margins": 2.979445695877075,
385
- "rewards/rejected": 0.2877880036830902,
386
  "step": 220
387
  },
388
  {
389
- "epoch": 0.6571428571428571,
390
- "grad_norm": 889.4233945739674,
391
- "learning_rate": 1.3153283438175034e-08,
392
- "logits/chosen": -4.367541313171387,
393
- "logits/rejected": -4.557114601135254,
394
- "logps/chosen": -279.52362060546875,
395
- "logps/rejected": -226.0491180419922,
396
- "loss": 0.2804,
397
  "rewards/accuracies": 0.862500011920929,
398
- "rewards/chosen": 2.717285633087158,
399
- "rewards/margins": 2.2621235847473145,
400
- "rewards/rejected": 0.45516151189804077,
401
  "step": 230
402
  },
403
  {
404
- "epoch": 0.6857142857142857,
405
- "grad_norm": 764.4075642697645,
406
- "learning_rate": 1.1227575463697438e-08,
407
- "logits/chosen": -4.391533851623535,
408
- "logits/rejected": -4.709025859832764,
409
- "logps/chosen": -256.52532958984375,
410
- "logps/rejected": -215.2234649658203,
411
- "loss": 0.287,
412
- "rewards/accuracies": 0.8500000238418579,
413
- "rewards/chosen": 2.9945430755615234,
414
- "rewards/margins": 2.5624358654022217,
415
- "rewards/rejected": 0.4321066439151764,
416
  "step": 240
417
  },
418
  {
419
- "epoch": 0.7142857142857143,
420
- "grad_norm": 1078.8910476393369,
421
- "learning_rate": 9.412754953531663e-09,
422
- "logits/chosen": -4.338193416595459,
423
- "logits/rejected": -4.612923622131348,
424
- "logps/chosen": -276.8057861328125,
425
- "logps/rejected": -232.43807983398438,
426
- "loss": 0.2923,
427
- "rewards/accuracies": 0.875,
428
- "rewards/chosen": 2.8848717212677,
429
- "rewards/margins": 2.400455951690674,
430
- "rewards/rejected": 0.4844156801700592,
431
  "step": 250
432
  },
433
  {
434
- "epoch": 0.7428571428571429,
435
- "grad_norm": 888.6230207618648,
436
- "learning_rate": 7.723433775328384e-09,
437
- "logits/chosen": -4.384097099304199,
438
- "logits/rejected": -4.630164623260498,
439
- "logps/chosen": -269.947265625,
440
- "logps/rejected": -240.2582244873047,
441
- "loss": 0.2917,
442
  "rewards/accuracies": 0.8687499761581421,
443
- "rewards/chosen": 2.7555510997772217,
444
- "rewards/margins": 2.3154397010803223,
445
- "rewards/rejected": 0.4401116967201233,
446
  "step": 260
447
  },
448
  {
449
- "epoch": 0.7714285714285715,
450
- "grad_norm": 839.6114448931011,
451
- "learning_rate": 6.173213349909728e-09,
452
- "logits/chosen": -4.516074180603027,
453
- "logits/rejected": -4.6834282875061035,
454
- "logps/chosen": -271.71820068359375,
455
- "logps/rejected": -227.9601287841797,
456
- "loss": 0.3199,
457
- "rewards/accuracies": 0.793749988079071,
458
- "rewards/chosen": 2.6797890663146973,
459
- "rewards/margins": 2.0464982986450195,
460
- "rewards/rejected": 0.6332908868789673,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
- "grad_norm": 794.9828383467956,
466
- "learning_rate": 4.7745751406263165e-09,
467
- "logits/chosen": -4.297934532165527,
468
- "logits/rejected": -4.586764335632324,
469
- "logps/chosen": -272.8429870605469,
470
- "logps/rejected": -229.56021118164062,
471
- "loss": 0.2842,
472
- "rewards/accuracies": 0.893750011920929,
473
- "rewards/chosen": 3.1210646629333496,
474
- "rewards/margins": 2.520183801651001,
475
- "rewards/rejected": 0.6008811593055725,
476
  "step": 280
477
  },
478
  {
479
- "epoch": 0.8285714285714286,
480
- "grad_norm": 1122.8170349728516,
481
- "learning_rate": 3.5387801599533474e-09,
482
- "logits/chosen": -4.3172149658203125,
483
- "logits/rejected": -4.5055928230285645,
484
- "logps/chosen": -280.2183837890625,
485
- "logps/rejected": -236.1828155517578,
486
- "loss": 0.3083,
487
- "rewards/accuracies": 0.8500000238418579,
488
- "rewards/chosen": 3.173701286315918,
489
- "rewards/margins": 2.5917415618896484,
490
- "rewards/rejected": 0.5819598436355591,
491
  "step": 290
492
  },
493
  {
494
- "epoch": 0.8571428571428571,
495
- "grad_norm": 746.1081306618772,
496
- "learning_rate": 2.475778302439524e-09,
497
- "logits/chosen": -4.29483699798584,
498
- "logits/rejected": -4.537248134613037,
499
- "logps/chosen": -296.06756591796875,
500
- "logps/rejected": -239.85092163085938,
501
- "loss": 0.3336,
502
  "rewards/accuracies": 0.875,
503
- "rewards/chosen": 3.3819689750671387,
504
- "rewards/margins": 2.6246695518493652,
505
- "rewards/rejected": 0.7572996020317078,
506
  "step": 300
507
  },
508
  {
509
- "epoch": 0.8571428571428571,
510
- "eval_logits/chosen": -3.2277979850769043,
511
- "eval_logits/rejected": -3.2277979850769043,
512
- "eval_logps/chosen": -158.84542846679688,
513
- "eval_logps/rejected": -158.84542846679688,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
- "eval_rewards/chosen": -1.6342105865478516,
517
  "eval_rewards/margins": 0.0,
518
- "eval_rewards/rejected": -1.6342105865478516,
519
- "eval_runtime": 1.5181,
520
- "eval_samples_per_second": 0.659,
521
- "eval_steps_per_second": 0.659,
522
  "step": 300
523
  },
524
  {
525
- "epoch": 0.8857142857142857,
526
- "grad_norm": 1352.5130710151727,
527
- "learning_rate": 1.5941282340065698e-09,
528
- "logits/chosen": -4.436648368835449,
529
- "logits/rejected": -4.576234817504883,
530
- "logps/chosen": -260.4552001953125,
531
- "logps/rejected": -226.2211456298828,
532
- "loss": 0.2894,
533
  "rewards/accuracies": 0.856249988079071,
534
- "rewards/chosen": 2.8304812908172607,
535
- "rewards/margins": 2.2945332527160645,
536
- "rewards/rejected": 0.535947859287262,
537
  "step": 310
538
  },
539
  {
540
- "epoch": 0.9142857142857143,
541
- "grad_norm": 759.0330474241811,
542
- "learning_rate": 9.009284826036689e-10,
543
- "logits/chosen": -4.273808002471924,
544
- "logits/rejected": -4.527020454406738,
545
- "logps/chosen": -290.4542541503906,
546
- "logps/rejected": -243.5283966064453,
547
- "loss": 0.314,
548
- "rewards/accuracies": 0.8687499761581421,
549
- "rewards/chosen": 3.2147815227508545,
550
- "rewards/margins": 2.507025718688965,
551
- "rewards/rejected": 0.7077558040618896,
552
  "step": 320
553
  },
554
  {
555
- "epoch": 0.9428571428571428,
556
- "grad_norm": 1012.631208038228,
557
- "learning_rate": 4.017602850342583e-10,
558
- "logits/chosen": -4.329623222351074,
559
- "logits/rejected": -4.535677433013916,
560
- "logps/chosen": -303.55926513671875,
561
- "logps/rejected": -251.89437866210938,
562
- "loss": 0.3017,
563
- "rewards/accuracies": 0.875,
564
- "rewards/chosen": 3.013214588165283,
565
- "rewards/margins": 2.4605515003204346,
566
- "rewards/rejected": 0.5526631474494934,
567
  "step": 330
568
  },
569
  {
570
- "epoch": 0.9714285714285714,
571
- "grad_norm": 1047.7085022489064,
572
- "learning_rate": 1.0064265011902328e-10,
573
- "logits/chosen": -4.3172688484191895,
574
- "logits/rejected": -4.609848976135254,
575
- "logps/chosen": -283.23883056640625,
576
- "logps/rejected": -227.2130126953125,
577
- "loss": 0.3029,
578
- "rewards/accuracies": 0.856249988079071,
579
- "rewards/chosen": 2.853224277496338,
580
- "rewards/margins": 2.2073826789855957,
581
- "rewards/rejected": 0.6458419561386108,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
- "grad_norm": 570.6961447414922,
587
  "learning_rate": 0.0,
588
- "logits/chosen": -4.2885661125183105,
589
- "logits/rejected": -4.4927144050598145,
590
- "logps/chosen": -289.79022216796875,
591
- "logps/rejected": -244.1138916015625,
592
- "loss": 0.2904,
593
- "rewards/accuracies": 0.84375,
594
- "rewards/chosen": 3.115464448928833,
595
- "rewards/margins": 2.5564632415771484,
596
- "rewards/rejected": 0.5590011477470398,
597
  "step": 350
598
  },
599
  {
600
  "epoch": 1.0,
601
  "step": 350,
602
  "total_flos": 0.0,
603
- "train_loss": 0.34604193687438967,
604
- "train_runtime": 5793.0735,
605
- "train_samples_per_second": 7.726,
606
- "train_steps_per_second": 0.06
607
  }
608
  ],
609
  "logging_steps": 10,
@@ -611,18 +611,6 @@
611
  "num_input_tokens_seen": 0,
612
  "num_train_epochs": 1,
613
  "save_steps": 100,
614
- "stateful_callbacks": {
615
- "TrainerControl": {
616
- "args": {
617
- "should_epoch_stop": false,
618
- "should_evaluate": false,
619
- "should_log": false,
620
- "should_save": true,
621
- "should_training_stop": false
622
- },
623
- "attributes": {}
624
- }
625
- },
626
  "total_flos": 0.0,
627
  "train_batch_size": 8,
628
  "trial_name": null,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "grad_norm": 3039.0846042522635,
14
+ "learning_rate": 1.4285714285714284e-08,
15
  "logits/chosen": -4.185730934143066,
16
  "logits/rejected": -4.509836196899414,
17
  "logps/chosen": -274.000732421875,
18
  "logps/rejected": -205.8054962158203,
19
+ "loss": 0.6932,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.03,
28
+ "grad_norm": 3122.252138846549,
29
+ "learning_rate": 1.4285714285714285e-07,
30
+ "logits/chosen": -4.2117600440979,
31
+ "logits/rejected": -4.4855546951293945,
32
+ "logps/chosen": -318.3944396972656,
33
+ "logps/rejected": -257.1120910644531,
34
+ "loss": 0.7578,
35
+ "rewards/accuracies": 0.4375,
36
+ "rewards/chosen": 0.11668112874031067,
37
+ "rewards/margins": -0.05277401953935623,
38
+ "rewards/rejected": 0.1694551408290863,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.06,
43
+ "grad_norm": 2683.55929188042,
44
+ "learning_rate": 2.857142857142857e-07,
45
+ "logits/chosen": -4.264363765716553,
46
+ "logits/rejected": -4.5196099281311035,
47
+ "logps/chosen": -303.1786193847656,
48
+ "logps/rejected": -243.7255096435547,
49
+ "loss": 0.5287,
50
+ "rewards/accuracies": 0.7749999761581421,
51
+ "rewards/chosen": 2.058025598526001,
52
+ "rewards/margins": 1.5574162006378174,
53
+ "rewards/rejected": 0.5006095767021179,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.09,
58
+ "grad_norm": 1727.068886617789,
59
+ "learning_rate": 4.285714285714285e-07,
60
+ "logits/chosen": -4.307942867279053,
61
+ "logits/rejected": -4.567526340484619,
62
+ "logps/chosen": -299.24615478515625,
63
+ "logps/rejected": -256.9350280761719,
64
+ "loss": 0.4422,
65
+ "rewards/accuracies": 0.831250011920929,
66
+ "rewards/chosen": 7.268563747406006,
67
+ "rewards/margins": 4.868700981140137,
68
+ "rewards/rejected": 2.3998632431030273,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.11,
73
+ "grad_norm": 2206.909408534419,
74
+ "learning_rate": 4.996892303047305e-07,
75
+ "logits/chosen": -4.312300682067871,
76
+ "logits/rejected": -4.578764915466309,
77
+ "logps/chosen": -288.2650451660156,
78
+ "logps/rejected": -235.3504638671875,
79
+ "loss": 0.5344,
80
+ "rewards/accuracies": 0.8062499761581421,
81
+ "rewards/chosen": 7.26935338973999,
82
+ "rewards/margins": 7.414456367492676,
83
+ "rewards/rejected": -0.145103320479393,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.14,
88
+ "grad_norm": 1574.7884185445766,
89
+ "learning_rate": 4.972077065562821e-07,
90
+ "logits/chosen": -4.287051200866699,
91
+ "logits/rejected": -4.532064914703369,
92
+ "logps/chosen": -295.8678894042969,
93
+ "logps/rejected": -256.9671325683594,
94
+ "loss": 0.514,
95
+ "rewards/accuracies": 0.8687499761581421,
96
+ "rewards/chosen": 6.857720851898193,
97
+ "rewards/margins": 10.064432144165039,
98
+ "rewards/rejected": -3.206712007522583,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.17,
103
+ "grad_norm": 2086.3490202582466,
104
+ "learning_rate": 4.922693215572695e-07,
105
+ "logits/chosen": -4.274147033691406,
106
+ "logits/rejected": -4.500526428222656,
107
+ "logps/chosen": -304.9868469238281,
108
+ "logps/rejected": -266.1625061035156,
109
+ "loss": 0.5936,
110
+ "rewards/accuracies": 0.84375,
111
+ "rewards/chosen": 6.364766597747803,
112
+ "rewards/margins": 9.915193557739258,
113
+ "rewards/rejected": -3.550427198410034,
114
  "step": 60
115
  },
116
  {
117
  "epoch": 0.2,
118
+ "grad_norm": 2489.785834029294,
119
+ "learning_rate": 4.849231551964771e-07,
120
+ "logits/chosen": -4.388774871826172,
121
+ "logits/rejected": -4.523660659790039,
122
+ "logps/chosen": -281.50799560546875,
123
+ "logps/rejected": -240.20126342773438,
124
+ "loss": 0.578,
125
+ "rewards/accuracies": 0.84375,
126
+ "rewards/chosen": 7.396153926849365,
127
+ "rewards/margins": 10.37517261505127,
128
+ "rewards/rejected": -2.9790191650390625,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.23,
133
+ "grad_norm": 1795.0951767218703,
134
+ "learning_rate": 4.752422169756047e-07,
135
+ "logits/chosen": -4.22324275970459,
136
+ "logits/rejected": -4.482357025146484,
137
+ "logps/chosen": -289.7461853027344,
138
+ "logps/rejected": -244.2007293701172,
139
+ "loss": 0.6974,
140
+ "rewards/accuracies": 0.875,
141
+ "rewards/chosen": 5.9415082931518555,
142
+ "rewards/margins": 9.690756797790527,
143
+ "rewards/rejected": -3.749248504638672,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.26,
148
+ "grad_norm": 1863.4883740900614,
149
+ "learning_rate": 4.6332272040803887e-07,
150
+ "logits/chosen": -4.147845268249512,
151
+ "logits/rejected": -4.379548072814941,
152
+ "logps/chosen": -301.5574645996094,
153
+ "logps/rejected": -264.04815673828125,
154
+ "loss": 0.6584,
155
+ "rewards/accuracies": 0.8125,
156
+ "rewards/chosen": 6.005797386169434,
157
+ "rewards/margins": 10.730647087097168,
158
+ "rewards/rejected": -4.724849700927734,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.29,
163
+ "grad_norm": 2150.0529520665987,
164
+ "learning_rate": 4.492831268057306e-07,
165
+ "logits/chosen": -4.204574108123779,
166
+ "logits/rejected": -4.426244735717773,
167
+ "logps/chosen": -287.6278076171875,
168
+ "logps/rejected": -244.3843231201172,
169
+ "loss": 0.7723,
170
+ "rewards/accuracies": 0.824999988079071,
171
+ "rewards/chosen": 8.200125694274902,
172
+ "rewards/margins": 10.383246421813965,
173
+ "rewards/rejected": -2.1831212043762207,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.29,
178
+ "eval_logits/chosen": -3.147157669067383,
179
+ "eval_logits/rejected": -3.147157669067383,
180
+ "eval_logps/chosen": -157.6016845703125,
181
+ "eval_logps/rejected": -157.6016845703125,
182
  "eval_loss": 0.6931471824645996,
183
  "eval_rewards/accuracies": 0.0,
184
+ "eval_rewards/chosen": -2.0246658325195312,
185
  "eval_rewards/margins": 0.0,
186
+ "eval_rewards/rejected": -2.0246658325195312,
187
+ "eval_runtime": 1.5111,
188
+ "eval_samples_per_second": 0.662,
189
+ "eval_steps_per_second": 0.662,
190
  "step": 100
191
  },
192
  {
193
+ "epoch": 0.31,
194
+ "grad_norm": 2069.7674123460392,
195
+ "learning_rate": 4.332629679574565e-07,
196
+ "logits/chosen": -4.301981449127197,
197
+ "logits/rejected": -4.562737464904785,
198
+ "logps/chosen": -298.0445861816406,
199
+ "logps/rejected": -258.07379150390625,
200
+ "loss": 0.9782,
201
+ "rewards/accuracies": 0.84375,
202
+ "rewards/chosen": 5.386460304260254,
203
+ "rewards/margins": 12.155603408813477,
204
+ "rewards/rejected": -6.769143581390381,
205
  "step": 110
206
  },
207
  {
208
+ "epoch": 0.34,
209
+ "grad_norm": 2493.1141420840436,
210
+ "learning_rate": 4.154214593992149e-07,
211
+ "logits/chosen": -4.2862958908081055,
212
+ "logits/rejected": -4.542973518371582,
213
+ "logps/chosen": -285.873046875,
214
+ "logps/rejected": -243.8842010498047,
215
+ "loss": 0.9519,
216
+ "rewards/accuracies": 0.8374999761581421,
217
+ "rewards/chosen": 7.061153411865234,
218
+ "rewards/margins": 11.827821731567383,
219
+ "rewards/rejected": -4.766669273376465,
220
  "step": 120
221
  },
222
  {
223
+ "epoch": 0.37,
224
+ "grad_norm": 1751.0600688695415,
225
+ "learning_rate": 3.959359180586975e-07,
226
+ "logits/chosen": -4.280123710632324,
227
+ "logits/rejected": -4.522739887237549,
228
+ "logps/chosen": -277.5672302246094,
229
+ "logps/rejected": -226.9970703125,
230
+ "loss": 0.7208,
231
+ "rewards/accuracies": 0.875,
232
+ "rewards/chosen": 5.132817268371582,
233
+ "rewards/margins": 12.189440727233887,
234
+ "rewards/rejected": -7.056623935699463,
235
  "step": 130
236
  },
237
  {
238
  "epoch": 0.4,
239
+ "grad_norm": 1830.8828386105183,
240
+ "learning_rate": 3.75e-07,
241
+ "logits/chosen": -4.162067413330078,
242
+ "logits/rejected": -4.473877429962158,
243
+ "logps/chosen": -294.2828369140625,
244
+ "logps/rejected": -248.1217041015625,
245
+ "loss": 0.8272,
246
+ "rewards/accuracies": 0.887499988079071,
247
+ "rewards/chosen": 5.600853443145752,
248
+ "rewards/margins": 12.919031143188477,
249
+ "rewards/rejected": -7.31817626953125,
250
  "step": 140
251
  },
252
  {
253
+ "epoch": 0.43,
254
+ "grad_norm": 1975.8832437780866,
255
+ "learning_rate": 3.528217757826529e-07,
256
+ "logits/chosen": -4.179337978363037,
257
+ "logits/rejected": -4.442940711975098,
258
+ "logps/chosen": -295.30914306640625,
259
+ "logps/rejected": -259.9874267578125,
260
+ "loss": 0.8642,
261
+ "rewards/accuracies": 0.7749999761581421,
262
+ "rewards/chosen": 4.101029872894287,
263
+ "rewards/margins": 12.387203216552734,
264
+ "rewards/rejected": -8.286172866821289,
265
  "step": 150
266
  },
267
  {
268
+ "epoch": 0.46,
269
+ "grad_norm": 1974.3387943820865,
270
+ "learning_rate": 3.296216625629211e-07,
271
+ "logits/chosen": -4.1478681564331055,
272
+ "logits/rejected": -4.431545257568359,
273
+ "logps/chosen": -294.5439758300781,
274
+ "logps/rejected": -244.33657836914062,
275
+ "loss": 0.9334,
276
+ "rewards/accuracies": 0.8812500238418579,
277
+ "rewards/chosen": 6.383957386016846,
278
+ "rewards/margins": 13.940587997436523,
279
+ "rewards/rejected": -7.556630611419678,
280
  "step": 160
281
  },
282
  {
283
+ "epoch": 0.49,
284
+ "grad_norm": 1732.7845040237094,
285
+ "learning_rate": 3.056302334890786e-07,
286
+ "logits/chosen": -4.18727970123291,
287
+ "logits/rejected": -4.425799369812012,
288
+ "logps/chosen": -294.9515075683594,
289
+ "logps/rejected": -250.8203125,
290
+ "loss": 0.8554,
291
+ "rewards/accuracies": 0.84375,
292
+ "rewards/chosen": 5.709182262420654,
293
+ "rewards/margins": 14.079116821289062,
294
+ "rewards/rejected": -8.369935989379883,
295
  "step": 170
296
  },
297
  {
298
+ "epoch": 0.51,
299
+ "grad_norm": 2121.8411757635613,
300
+ "learning_rate": 2.810859261618713e-07,
301
+ "logits/chosen": -4.313704490661621,
302
+ "logits/rejected": -4.544769287109375,
303
+ "logps/chosen": -282.9936828613281,
304
+ "logps/rejected": -250.189453125,
305
+ "loss": 0.7998,
306
+ "rewards/accuracies": 0.824999988079071,
307
+ "rewards/chosen": 6.883467674255371,
308
+ "rewards/margins": 14.591836929321289,
309
+ "rewards/rejected": -7.708369255065918,
310
  "step": 180
311
  },
312
  {
313
+ "epoch": 0.54,
314
+ "grad_norm": 1756.6999736537189,
315
+ "learning_rate": 2.5623267293451823e-07,
316
+ "logits/chosen": -4.283580303192139,
317
+ "logits/rejected": -4.457066535949707,
318
+ "logps/chosen": -305.109375,
319
+ "logps/rejected": -265.3091125488281,
320
+ "loss": 0.6833,
321
+ "rewards/accuracies": 0.887499988079071,
322
+ "rewards/chosen": 7.185091495513916,
323
+ "rewards/margins": 13.808235168457031,
324
+ "rewards/rejected": -6.623143196105957,
325
  "step": 190
326
  },
327
  {
328
+ "epoch": 0.57,
329
+ "grad_norm": 1748.1359792805438,
330
+ "learning_rate": 2.3131747660339394e-07,
331
+ "logits/chosen": -4.295716285705566,
332
+ "logits/rejected": -4.545838832855225,
333
+ "logps/chosen": -289.03521728515625,
334
+ "logps/rejected": -257.3003234863281,
335
+ "loss": 0.7717,
336
  "rewards/accuracies": 0.856249988079071,
337
+ "rewards/chosen": 8.096379280090332,
338
+ "rewards/margins": 14.581645011901855,
339
+ "rewards/rejected": -6.485265254974365,
340
  "step": 200
341
  },
342
  {
343
+ "epoch": 0.57,
344
+ "eval_logits/chosen": -3.163522243499756,
345
+ "eval_logits/rejected": -3.163522243499756,
346
+ "eval_logps/chosen": -167.66006469726562,
347
+ "eval_logps/rejected": -167.66006469726562,
348
  "eval_loss": 0.6931471824645996,
349
  "eval_rewards/accuracies": 0.0,
350
+ "eval_rewards/chosen": -12.083049774169922,
351
  "eval_rewards/margins": 0.0,
352
+ "eval_rewards/rejected": -12.083049774169922,
353
+ "eval_runtime": 1.4711,
354
+ "eval_samples_per_second": 0.68,
355
+ "eval_steps_per_second": 0.68,
356
  "step": 200
357
  },
358
  {
359
  "epoch": 0.6,
360
+ "grad_norm": 1829.602720191424,
361
+ "learning_rate": 2.065879555832674e-07,
362
+ "logits/chosen": -4.250877857208252,
363
+ "logits/rejected": -4.542287826538086,
364
+ "logps/chosen": -303.2696838378906,
365
+ "logps/rejected": -262.7691650390625,
366
+ "loss": 0.8229,
367
  "rewards/accuracies": 0.8687499761581421,
368
+ "rewards/chosen": 7.95089864730835,
369
+ "rewards/margins": 15.162821769714355,
370
+ "rewards/rejected": -7.211922645568848,
371
  "step": 210
372
  },
373
  {
374
+ "epoch": 0.63,
375
+ "grad_norm": 1788.0044780541311,
376
+ "learning_rate": 1.8228988296424875e-07,
377
+ "logits/chosen": -4.257784366607666,
378
+ "logits/rejected": -4.608870983123779,
379
+ "logps/chosen": -293.54071044921875,
380
+ "logps/rejected": -233.70166015625,
381
+ "loss": 0.7993,
382
+ "rewards/accuracies": 0.8812500238418579,
383
+ "rewards/chosen": 7.973156929016113,
384
+ "rewards/margins": 16.081945419311523,
385
+ "rewards/rejected": -8.108789443969727,
386
  "step": 220
387
  },
388
  {
389
+ "epoch": 0.66,
390
+ "grad_norm": 1900.3878505004946,
391
+ "learning_rate": 1.5866474390840124e-07,
392
+ "logits/chosen": -4.334306716918945,
393
+ "logits/rejected": -4.525221824645996,
394
+ "logps/chosen": -278.41827392578125,
395
+ "logps/rejected": -233.2617645263672,
396
+ "loss": 0.6293,
397
  "rewards/accuracies": 0.862500011920929,
398
+ "rewards/chosen": 6.539956569671631,
399
+ "rewards/margins": 12.842289924621582,
400
+ "rewards/rejected": -6.30233097076416,
401
  "step": 230
402
  },
403
  {
404
+ "epoch": 0.69,
405
+ "grad_norm": 1759.0935765619065,
406
+ "learning_rate": 1.3594733566170925e-07,
407
+ "logits/chosen": -4.360232353210449,
408
+ "logits/rejected": -4.688153266906738,
409
+ "logps/chosen": -255.55807495117188,
410
+ "logps/rejected": -222.76504516601562,
411
+ "loss": 1.0017,
412
+ "rewards/accuracies": 0.831250011920929,
413
+ "rewards/chosen": 6.956341743469238,
414
+ "rewards/margins": 13.633699417114258,
415
+ "rewards/rejected": -6.677358150482178,
416
  "step": 240
417
  },
418
  {
419
+ "epoch": 0.71,
420
+ "grad_norm": 1730.0292892661773,
421
+ "learning_rate": 1.1436343403356016e-07,
422
+ "logits/chosen": -4.352273941040039,
423
+ "logits/rejected": -4.6365180015563965,
424
+ "logps/chosen": -276.07659912109375,
425
+ "logps/rejected": -241.01425170898438,
426
+ "loss": 0.8087,
427
+ "rewards/accuracies": 0.8687499761581421,
428
+ "rewards/chosen": 6.498913764953613,
429
+ "rewards/margins": 14.106257438659668,
430
+ "rewards/rejected": -7.6073455810546875,
431
  "step": 250
432
  },
433
  {
434
+ "epoch": 0.74,
435
+ "grad_norm": 1731.9100006560693,
436
+ "learning_rate": 9.412754953531663e-08,
437
+ "logits/chosen": -4.388053894042969,
438
+ "logits/rejected": -4.64432954788208,
439
+ "logps/chosen": -270.12225341796875,
440
+ "logps/rejected": -249.11416625976562,
441
+ "loss": 0.6771,
442
  "rewards/accuracies": 0.8687499761581421,
443
+ "rewards/chosen": 5.336110591888428,
444
+ "rewards/margins": 13.311798095703125,
445
+ "rewards/rejected": -7.975686073303223,
446
  "step": 260
447
  },
448
  {
449
+ "epoch": 0.77,
450
+ "grad_norm": 1559.894108247427,
451
+ "learning_rate": 7.544079547848181e-08,
452
+ "logits/chosen": -4.511970520019531,
453
+ "logits/rejected": -4.677350044250488,
454
+ "logps/chosen": -272.5389709472656,
455
+ "logps/rejected": -237.0705108642578,
456
+ "loss": 0.816,
457
+ "rewards/accuracies": 0.831250011920929,
458
+ "rewards/chosen": 4.5388288497924805,
459
+ "rewards/margins": 12.382614135742188,
460
+ "rewards/rejected": -7.843785762786865,
461
  "step": 270
462
  },
463
  {
464
  "epoch": 0.8,
465
+ "grad_norm": 2131.975353118901,
466
+ "learning_rate": 5.848888922025552e-08,
467
+ "logits/chosen": -4.293630123138428,
468
+ "logits/rejected": -4.587409496307373,
469
+ "logps/chosen": -272.18914794921875,
470
+ "logps/rejected": -237.04507446289062,
471
+ "loss": 0.7076,
472
+ "rewards/accuracies": 0.8500000238418579,
473
+ "rewards/chosen": 6.89593505859375,
474
+ "rewards/margins": 13.17906379699707,
475
+ "rewards/rejected": -6.283128261566162,
476
  "step": 280
477
  },
478
  {
479
+ "epoch": 0.83,
480
+ "grad_norm": 2073.5868352302195,
481
+ "learning_rate": 4.3440306421001324e-08,
482
+ "logits/chosen": -4.306157112121582,
483
+ "logits/rejected": -4.501837253570557,
484
+ "logps/chosen": -279.41571044921875,
485
+ "logps/rejected": -245.3014678955078,
486
+ "loss": 0.7257,
487
+ "rewards/accuracies": 0.918749988079071,
488
+ "rewards/chosen": 7.150078773498535,
489
+ "rewards/margins": 15.104804992675781,
490
+ "rewards/rejected": -7.954724311828613,
491
  "step": 290
492
  },
493
  {
494
+ "epoch": 0.86,
495
+ "grad_norm": 1664.7949029760775,
496
+ "learning_rate": 3.044460665744283e-08,
497
+ "logits/chosen": -4.291565418243408,
498
+ "logits/rejected": -4.546942234039307,
499
+ "logps/chosen": -294.171142578125,
500
+ "logps/rejected": -248.09219360351562,
501
+ "loss": 0.782,
502
  "rewards/accuracies": 0.875,
503
+ "rewards/chosen": 8.660406112670898,
504
+ "rewards/margins": 15.387075424194336,
505
+ "rewards/rejected": -6.726672172546387,
506
  "step": 300
507
  },
508
  {
509
+ "epoch": 0.86,
510
+ "eval_logits/chosen": -3.222372531890869,
511
+ "eval_logits/rejected": -3.222372531890869,
512
+ "eval_logps/chosen": -164.06509399414062,
513
+ "eval_logps/rejected": -164.06509399414062,
514
  "eval_loss": 0.6931471824645996,
515
  "eval_rewards/accuracies": 0.0,
516
+ "eval_rewards/chosen": -8.488082885742188,
517
  "eval_rewards/margins": 0.0,
518
+ "eval_rewards/rejected": -8.488082885742188,
519
+ "eval_runtime": 1.4741,
520
+ "eval_samples_per_second": 0.678,
521
+ "eval_steps_per_second": 0.678,
522
  "step": 300
523
  },
524
  {
525
+ "epoch": 0.89,
526
+ "grad_norm": 2673.859330983399,
527
+ "learning_rate": 1.9630947032398066e-08,
528
+ "logits/chosen": -4.44521427154541,
529
+ "logits/rejected": -4.5893964767456055,
530
+ "logps/chosen": -260.0516662597656,
531
+ "logps/rejected": -234.23690795898438,
532
+ "loss": 0.7257,
533
  "rewards/accuracies": 0.856249988079071,
534
+ "rewards/chosen": 6.064515113830566,
535
+ "rewards/margins": 13.008413314819336,
536
+ "rewards/rejected": -6.943899631500244,
537
  "step": 310
538
  },
539
  {
540
+ "epoch": 0.91,
541
+ "grad_norm": 1691.3211570500287,
542
+ "learning_rate": 1.1106798553464802e-08,
543
+ "logits/chosen": -4.273613929748535,
544
+ "logits/rejected": -4.540968894958496,
545
+ "logps/chosen": -289.5185546875,
546
+ "logps/rejected": -251.7852325439453,
547
+ "loss": 0.6936,
548
+ "rewards/accuracies": 0.887499988079071,
549
+ "rewards/chosen": 7.365248203277588,
550
+ "rewards/margins": 14.206552505493164,
551
+ "rewards/rejected": -6.84130334854126,
552
  "step": 320
553
  },
554
  {
555
+ "epoch": 0.94,
556
+ "grad_norm": 2384.133770310185,
557
+ "learning_rate": 4.956878037864043e-09,
558
+ "logits/chosen": -4.335446834564209,
559
+ "logits/rejected": -4.543330669403076,
560
+ "logps/chosen": -303.6029968261719,
561
+ "logps/rejected": -260.98760986328125,
562
+ "loss": 0.7573,
563
+ "rewards/accuracies": 0.856249988079071,
564
+ "rewards/chosen": 5.982678413391113,
565
+ "rewards/margins": 13.970565795898438,
566
+ "rewards/rejected": -7.987887382507324,
567
  "step": 330
568
  },
569
  {
570
+ "epoch": 0.97,
571
+ "grad_norm": 2211.192973189294,
572
+ "learning_rate": 1.2423061586496476e-09,
573
+ "logits/chosen": -4.319240093231201,
574
+ "logits/rejected": -4.623973846435547,
575
+ "logps/chosen": -283.38006591796875,
576
+ "logps/rejected": -234.4169921875,
577
+ "loss": 0.8286,
578
+ "rewards/accuracies": 0.8687499761581421,
579
+ "rewards/chosen": 5.565188407897949,
580
+ "rewards/margins": 11.47750186920166,
581
+ "rewards/rejected": -5.912313938140869,
582
  "step": 340
583
  },
584
  {
585
  "epoch": 1.0,
586
+ "grad_norm": 1586.2152634156816,
587
  "learning_rate": 0.0,
588
+ "logits/chosen": -4.285008430480957,
589
+ "logits/rejected": -4.503040790557861,
590
+ "logps/chosen": -289.0204162597656,
591
+ "logps/rejected": -251.9452362060547,
592
+ "loss": 0.7666,
593
+ "rewards/accuracies": 0.875,
594
+ "rewards/chosen": 7.000736236572266,
595
+ "rewards/margins": 13.714078903198242,
596
+ "rewards/rejected": -6.713343143463135,
597
  "step": 350
598
  },
599
  {
600
  "epoch": 1.0,
601
  "step": 350,
602
  "total_flos": 0.0,
603
+ "train_loss": 0.7428068714482444,
604
+ "train_runtime": 5299.044,
605
+ "train_samples_per_second": 8.446,
606
+ "train_steps_per_second": 0.066
607
  }
608
  ],
609
  "logging_steps": 10,
 
611
  "num_input_tokens_seen": 0,
612
  "num_train_epochs": 1,
613
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
614
  "total_flos": 0.0,
615
  "train_batch_size": 8,
616
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cfcdc187e96bc5f72f21240b097691d6a8cdf8d0fcc5ad2fe20f43a43287471
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25686a2a94e778bd06e64992e121362fb8e396ce160a2ce4cb0004cc6efb88ff
3
+ size 6264