RikkiXu commited on
Commit
7dd2eeb
1 Parent(s): 9e04fe2

Model save

Browse files
README.md CHANGED
@@ -15,15 +15,15 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 6.9230
19
- - Rewards/chosen: -4.5175
20
- - Rewards/rejected: 0.4288
21
- - Rewards/accuracies: 0.3164
22
- - Rewards/margins: -4.9464
23
- - Logps/rejected: -517.5300
24
- - Logps/chosen: -399.5095
25
- - Logits/rejected: -4.8908
26
- - Logits/chosen: -4.6604
27
 
28
  ## Model description
29
 
@@ -42,7 +42,7 @@ More information needed
42
  ### Training hyperparameters
43
 
44
  The following hyperparameters were used during training:
45
- - learning_rate: 5e-07
46
  - train_batch_size: 8
47
  - eval_batch_size: 8
48
  - seed: 42
@@ -58,16 +58,16 @@ The following hyperparameters were used during training:
58
 
59
  ### Training results
60
 
61
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
- |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
- | 0.5909 | 0.2635 | 100 | 6.6534 | -3.0090 | 2.8904 | 0.2773 | -5.8994 | -512.6068 | -396.4924 | -4.8508 | -4.6121 |
64
- | 0.7239 | 0.5270 | 200 | 8.0720 | -2.8191 | 3.5065 | 0.2734 | -6.3256 | -511.3747 | -396.1127 | -4.9896 | -4.7715 |
65
- | 0.5556 | 0.7905 | 300 | 6.9230 | -4.5175 | 0.4288 | 0.3164 | -4.9464 | -517.5300 | -399.5095 | -4.8908 | -4.6604 |
66
 
67
 
68
  ### Framework versions
69
 
70
- - Transformers 4.41.1
71
  - Pytorch 2.1.2+cu118
72
  - Datasets 2.16.1
73
- - Tokenizers 0.19.1
 
15
 
16
  This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 1.9828
19
+ - Rewards/chosen: -3.4223
20
+ - Rewards/rejected: -2.1126
21
+ - Rewards/accuracies: 0.3555
22
+ - Rewards/margins: -1.3097
23
+ - Logps/rejected: -521.2875
24
+ - Logps/chosen: -405.8879
25
+ - Logits/rejected: -4.9364
26
+ - Logits/chosen: -4.7068
27
 
28
  ## Model description
29
 
 
42
  ### Training hyperparameters
43
 
44
  The following hyperparameters were used during training:
45
+ - learning_rate: 5e-08
46
  - train_batch_size: 8
47
  - eval_batch_size: 8
48
  - seed: 42
 
58
 
59
  ### Training results
60
 
61
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.3652 | 0.29 | 100 | 1.7488 | -2.2159 | -1.0097 | 0.3516 | -1.2062 | -519.0817 | -403.4752 | -4.9249 | -4.6962 |
64
+ | 0.312 | 0.57 | 200 | 1.9596 | -3.1949 | -1.8164 | 0.3398 | -1.3786 | -520.6950 | -405.4332 | -4.9391 | -4.7096 |
65
+ | 0.2993 | 0.86 | 300 | 1.9828 | -3.4223 | -2.1126 | 0.3555 | -1.3097 | -521.2875 | -405.8879 | -4.9364 | -4.7068 |
66
 
67
 
68
  ### Framework versions
69
 
70
+ - Transformers 4.38.2
71
  - Pytorch 2.1.2+cu118
72
  - Datasets 2.16.1
73
+ - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
- "epoch": 0.9986824769433466,
3
- "total_flos": 0.0,
4
- "train_loss": 0.5517442987587962,
5
- "train_runtime": 6181.8185,
6
- "train_samples": 48530,
7
- "train_samples_per_second": 7.85,
8
- "train_steps_per_second": 0.061
9
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.3696309270011661,
4
+ "train_runtime": 5591.2149,
5
+ "train_samples": 44682,
6
+ "train_samples_per_second": 7.991,
7
+ "train_steps_per_second": 0.062
 
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.41.1",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.38.2",
24
  "use_cache": false,
25
  "vocab_size": 32002
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
- "transformers_version": "4.41.1"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 32000,
5
+ "transformers_version": "4.38.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fad6798eb6bb6153f6b5aff19d3bc5cffa1aeb40bf26f57d5a587788a415d08c
3
  size 4943178720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b289b436dc9787cb73fff5a2a0c5fb69ad78dc06d960a4946cb9d6b35bf920b3
3
  size 4943178720
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20ca669573e568702fa6d40967a3d29846dc5ecf876326abd24dbc1904e69161
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556d3f80045da81e8b580005d0676f922550a32c41b802ac16391cdb8a56eac7
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddf6a93711a98049fe876e8b35960fb84be7446726bf529c7236bf8c1e7f0d9d
3
  size 4540532728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06047de541b6531a598ee5f5cc63aaa960471d4a4c6da134647cc69aa23f71b0
3
  size 4540532728
runs/May29_23-11-40_n136-129-074/events.out.tfevents.1716995522.n136-129-074.2191665.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:306ad423168f6c7a36b9bc75ec55c766a7028a9abbdc8ffebb6e790aa8f8b678
3
- size 28284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e85772f2279a93f5a7a61be83422e97cc2aacc7c45c6008ebf29def9ccc889
3
+ size 31390
tokenizer.json CHANGED
@@ -152,7 +152,6 @@
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
155
- "ignore_merges": false,
156
  "vocab": {
157
  "<unk>": 0,
158
  "<s>": 1,
 
152
  "end_of_word_suffix": null,
153
  "fuse_unk": true,
154
  "byte_fallback": true,
 
155
  "vocab": {
156
  "<unk>": 0,
157
  "<s>": 1,
train_results.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
- "epoch": 0.9986824769433466,
3
- "total_flos": 0.0,
4
- "train_loss": 0.5517442987587962,
5
- "train_runtime": 6181.8185,
6
- "train_samples": 48530,
7
- "train_samples_per_second": 7.85,
8
- "train_steps_per_second": 0.061
9
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.3696309270011661,
4
+ "train_runtime": 5591.2149,
5
+ "train_samples": 44682,
6
+ "train_samples_per_second": 7.991,
7
+ "train_steps_per_second": 0.062
 
8
  }
trainer_state.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9986824769433466,
5
  "eval_steps": 100,
6
- "global_step": 379,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.002635046113306983,
13
- "grad_norm": 1701.3284378032004,
14
- "learning_rate": 1.3157894736842104e-08,
15
- "logits/chosen": -4.685327529907227,
16
- "logits/rejected": -4.87608528137207,
17
- "logps/chosen": -207.7137451171875,
18
- "logps/rejected": -145.5098114013672,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
@@ -24,635 +24,578 @@
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.026350461133069828,
28
- "grad_norm": 1584.9120962726856,
29
- "learning_rate": 1.3157894736842104e-07,
30
- "logits/chosen": -4.499300479888916,
31
- "logits/rejected": -4.840802192687988,
32
- "logps/chosen": -223.6631317138672,
33
- "logps/rejected": -160.81097412109375,
34
- "loss": 0.7136,
35
- "rewards/accuracies": 0.4444444477558136,
36
- "rewards/chosen": 0.004151582717895508,
37
- "rewards/margins": 0.002958830911666155,
38
- "rewards/rejected": 0.001192751806229353,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.052700922266139656,
43
- "grad_norm": 955.29292445595,
44
- "learning_rate": 2.631578947368421e-07,
45
- "logits/chosen": -4.518028259277344,
46
- "logits/rejected": -4.817793846130371,
47
- "logps/chosen": -220.1512451171875,
48
- "logps/rejected": -172.69322204589844,
49
- "loss": 0.4939,
50
- "rewards/accuracies": 0.7562500238418579,
51
- "rewards/chosen": 0.9042474627494812,
52
- "rewards/margins": 0.9315685033798218,
53
- "rewards/rejected": -0.02732105180621147,
54
  "step": 20
55
  },
56
  {
57
- "epoch": 0.07905138339920949,
58
- "grad_norm": 1078.6380127502835,
59
- "learning_rate": 3.9473684210526315e-07,
60
- "logits/chosen": -4.58280086517334,
61
- "logits/rejected": -4.878857612609863,
62
- "logps/chosen": -212.6325225830078,
63
- "logps/rejected": -183.79238891601562,
64
- "loss": 0.3906,
65
- "rewards/accuracies": 0.8374999761581421,
66
- "rewards/chosen": 3.010448932647705,
67
- "rewards/margins": 2.6640021800994873,
68
- "rewards/rejected": 0.346446692943573,
69
  "step": 30
70
  },
71
  {
72
- "epoch": 0.10540184453227931,
73
- "grad_norm": 1034.6942446334156,
74
- "learning_rate": 4.999575626062319e-07,
75
- "logits/chosen": -4.496463298797607,
76
- "logits/rejected": -4.832797050476074,
77
- "logps/chosen": -225.2804412841797,
78
- "logps/rejected": -177.3509063720703,
79
- "loss": 0.4804,
80
- "rewards/accuracies": 0.862500011920929,
81
- "rewards/chosen": 4.241217136383057,
82
- "rewards/margins": 4.989673614501953,
83
- "rewards/rejected": -0.7484563589096069,
84
  "step": 40
85
  },
86
  {
87
- "epoch": 0.13175230566534915,
88
- "grad_norm": 1292.8962171873402,
89
- "learning_rate": 4.984737660598186e-07,
90
- "logits/chosen": -4.517908573150635,
91
- "logits/rejected": -4.786294937133789,
92
- "logps/chosen": -214.01718139648438,
93
- "logps/rejected": -174.09678649902344,
94
- "loss": 0.5163,
95
- "rewards/accuracies": 0.84375,
96
- "rewards/chosen": 3.7782645225524902,
97
- "rewards/margins": 5.338944435119629,
98
- "rewards/rejected": -1.5606796741485596,
99
  "step": 50
100
  },
101
  {
102
- "epoch": 0.15810276679841898,
103
- "grad_norm": 1081.6576652859876,
104
- "learning_rate": 4.948824853131236e-07,
105
- "logits/chosen": -4.719171047210693,
106
- "logits/rejected": -4.9818878173828125,
107
- "logps/chosen": -215.00869750976562,
108
- "logps/rejected": -180.1785430908203,
109
- "loss": 0.4821,
110
- "rewards/accuracies": 0.862500011920929,
111
- "rewards/chosen": 3.4492969512939453,
112
- "rewards/margins": 6.232473373413086,
113
- "rewards/rejected": -2.7831759452819824,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.1844532279314888,
118
- "grad_norm": 780.7579097796144,
119
- "learning_rate": 4.892141805936084e-07,
120
- "logits/chosen": -4.691411018371582,
121
- "logits/rejected": -4.9814910888671875,
122
- "logps/chosen": -222.72189331054688,
123
- "logps/rejected": -186.29788208007812,
124
- "loss": 0.5005,
125
- "rewards/accuracies": 0.8500000238418579,
126
- "rewards/chosen": 3.7675652503967285,
127
- "rewards/margins": 6.836648464202881,
128
- "rewards/rejected": -3.0690836906433105,
129
  "step": 70
130
  },
131
  {
132
- "epoch": 0.21080368906455862,
133
- "grad_norm": 1751.5015463679365,
134
- "learning_rate": 4.81516928858564e-07,
135
- "logits/chosen": -4.691437721252441,
136
- "logits/rejected": -4.943267822265625,
137
- "logps/chosen": -219.7614288330078,
138
- "logps/rejected": -179.60899353027344,
139
- "loss": 0.5615,
140
- "rewards/accuracies": 0.831250011920929,
141
- "rewards/chosen": 3.4195969104766846,
142
- "rewards/margins": 6.783135890960693,
143
- "rewards/rejected": -3.363539457321167,
144
  "step": 80
145
  },
146
  {
147
- "epoch": 0.23715415019762845,
148
- "grad_norm": 1233.3333710128843,
149
- "learning_rate": 4.7185601601995784e-07,
150
- "logits/chosen": -4.569981575012207,
151
- "logits/rejected": -4.954944610595703,
152
- "logps/chosen": -212.4117431640625,
153
- "logps/rejected": -173.86911010742188,
154
- "loss": 0.4989,
155
- "rewards/accuracies": 0.856249988079071,
156
- "rewards/chosen": 3.7934730052948,
157
- "rewards/margins": 7.21230936050415,
158
- "rewards/rejected": -3.4188361167907715,
159
  "step": 90
160
  },
161
  {
162
- "epoch": 0.2635046113306983,
163
- "grad_norm": 1244.999056136522,
164
- "learning_rate": 4.603133832077953e-07,
165
- "logits/chosen": -4.672577381134033,
166
- "logits/rejected": -4.9231157302856445,
167
- "logps/chosen": -206.11221313476562,
168
- "logps/rejected": -176.262451171875,
169
- "loss": 0.5909,
170
  "rewards/accuracies": 0.8374999761581421,
171
- "rewards/chosen": 3.6933536529541016,
172
- "rewards/margins": 6.304307460784912,
173
- "rewards/rejected": -2.6109542846679688,
174
  "step": 100
175
  },
176
  {
177
- "epoch": 0.2635046113306983,
178
- "eval_logits/chosen": -4.612120628356934,
179
- "eval_logits/rejected": -4.85081672668457,
180
- "eval_logps/chosen": -396.4924011230469,
181
- "eval_logps/rejected": -512.6068115234375,
182
- "eval_loss": 6.653407096862793,
183
- "eval_rewards/accuracies": 0.27734375,
184
- "eval_rewards/chosen": -3.0089728832244873,
185
- "eval_rewards/margins": -5.8993988037109375,
186
- "eval_rewards/rejected": 2.890425682067871,
187
- "eval_runtime": 98.318,
188
- "eval_samples_per_second": 20.342,
189
- "eval_steps_per_second": 0.325,
190
  "step": 100
191
  },
192
  {
193
- "epoch": 0.2898550724637681,
194
- "grad_norm": 870.1595065644425,
195
- "learning_rate": 4.4698693176863316e-07,
196
- "logits/chosen": -4.71740198135376,
197
- "logits/rejected": -4.952963829040527,
198
- "logps/chosen": -208.0568084716797,
199
- "logps/rejected": -172.92605590820312,
200
- "loss": 0.4837,
201
- "rewards/accuracies": 0.8374999761581421,
202
- "rewards/chosen": 3.114009380340576,
203
- "rewards/margins": 7.352834224700928,
204
- "rewards/rejected": -4.238823890686035,
205
  "step": 110
206
  },
207
  {
208
- "epoch": 0.31620553359683795,
209
- "grad_norm": 1230.5660125025618,
210
- "learning_rate": 4.319896928940505e-07,
211
- "logits/chosen": -4.8249382972717285,
212
- "logits/rejected": -5.139795780181885,
213
- "logps/chosen": -204.9242401123047,
214
- "logps/rejected": -181.16221618652344,
215
- "loss": 0.6657,
216
- "rewards/accuracies": 0.8125,
217
- "rewards/chosen": 4.0227861404418945,
218
- "rewards/margins": 6.973240852355957,
219
- "rewards/rejected": -2.950455904006958,
220
  "step": 120
221
  },
222
  {
223
- "epoch": 0.3425559947299078,
224
- "grad_norm": 1400.2090737238004,
225
- "learning_rate": 4.1544886892205354e-07,
226
- "logits/chosen": -4.680369853973389,
227
- "logits/rejected": -4.979363441467285,
228
- "logps/chosen": -218.03897094726562,
229
- "logps/rejected": -192.44757080078125,
230
- "loss": 0.4732,
231
- "rewards/accuracies": 0.862500011920929,
232
- "rewards/chosen": 3.1923911571502686,
233
- "rewards/margins": 8.124491691589355,
234
- "rewards/rejected": -4.932101249694824,
235
  "step": 130
236
  },
237
  {
238
- "epoch": 0.3689064558629776,
239
- "grad_norm": 1284.6303869923338,
240
- "learning_rate": 3.975047544428254e-07,
241
- "logits/chosen": -4.738985061645508,
242
- "logits/rejected": -4.995828628540039,
243
- "logps/chosen": -200.0480499267578,
244
- "logps/rejected": -173.74069213867188,
245
- "loss": 0.7249,
246
- "rewards/accuracies": 0.8687499761581421,
247
- "rewards/chosen": 3.118265151977539,
248
- "rewards/margins": 6.879487037658691,
249
- "rewards/rejected": -3.7612221240997314,
250
  "step": 140
251
  },
252
  {
253
- "epoch": 0.3952569169960474,
254
- "grad_norm": 1087.9280552765,
255
- "learning_rate": 3.78309546359696e-07,
256
- "logits/chosen": -4.816591739654541,
257
- "logits/rejected": -5.046236991882324,
258
- "logps/chosen": -197.75784301757812,
259
- "logps/rejected": -187.12631225585938,
260
- "loss": 0.6191,
261
- "rewards/accuracies": 0.918749988079071,
262
- "rewards/chosen": 1.7183030843734741,
263
- "rewards/margins": 8.374044418334961,
264
- "rewards/rejected": -6.655740261077881,
265
  "step": 150
266
  },
267
  {
268
- "epoch": 0.42160737812911725,
269
- "grad_norm": 870.0436658351372,
270
- "learning_rate": 3.580260529980584e-07,
271
- "logits/chosen": -4.622679233551025,
272
- "logits/rejected": -4.90102481842041,
273
- "logps/chosen": -210.0752410888672,
274
- "logps/rejected": -182.36378479003906,
275
- "loss": 0.5743,
276
- "rewards/accuracies": 0.8374999761581421,
277
- "rewards/chosen": 2.5026469230651855,
278
- "rewards/margins": 6.609257698059082,
279
- "rewards/rejected": -4.106611251831055,
280
  "step": 160
281
  },
282
  {
283
- "epoch": 0.4479578392621871,
284
- "grad_norm": 1109.7545725706777,
285
- "learning_rate": 3.36826313211205e-07,
286
- "logits/chosen": -4.862623691558838,
287
- "logits/rejected": -5.1108198165893555,
288
- "logps/chosen": -195.3279266357422,
289
- "logps/rejected": -170.62802124023438,
290
- "loss": 0.6483,
291
- "rewards/accuracies": 0.8187500238418579,
292
- "rewards/chosen": 2.5267701148986816,
293
- "rewards/margins": 7.295065402984619,
294
- "rewards/rejected": -4.768294811248779,
295
  "step": 170
296
  },
297
  {
298
- "epoch": 0.4743083003952569,
299
- "grad_norm": 1174.4955529247104,
300
- "learning_rate": 3.14890137195437e-07,
301
- "logits/chosen": -4.831478595733643,
302
- "logits/rejected": -5.037031173706055,
303
- "logps/chosen": -211.2603302001953,
304
- "logps/rejected": -190.32931518554688,
305
- "loss": 0.5829,
306
- "rewards/accuracies": 0.84375,
307
- "rewards/chosen": 1.8433873653411865,
308
- "rewards/margins": 7.003817558288574,
309
- "rewards/rejected": -5.160429954528809,
310
  "step": 180
311
  },
312
  {
313
- "epoch": 0.5006587615283268,
314
- "grad_norm": 1405.2874598551575,
315
- "learning_rate": 2.9240358139084013e-07,
316
- "logits/chosen": -4.9419050216674805,
317
- "logits/rejected": -5.200289726257324,
318
- "logps/chosen": -209.5860595703125,
319
- "logps/rejected": -188.95069885253906,
320
- "loss": 0.5485,
321
- "rewards/accuracies": 0.856249988079071,
322
- "rewards/chosen": 1.218836784362793,
323
- "rewards/margins": 8.241748809814453,
324
- "rewards/rejected": -7.022912502288818,
325
  "step": 190
326
  },
327
  {
328
- "epoch": 0.5270092226613966,
329
- "grad_norm": 1006.939051019727,
330
- "learning_rate": 2.695573704031885e-07,
331
- "logits/chosen": -4.7473955154418945,
332
- "logits/rejected": -5.000621795654297,
333
- "logps/chosen": -217.6959686279297,
334
- "logps/rejected": -190.3670196533203,
335
- "loss": 0.7239,
336
- "rewards/accuracies": 0.8374999761581421,
337
- "rewards/chosen": 3.8086190223693848,
338
- "rewards/margins": 6.961747646331787,
339
- "rewards/rejected": -3.1531288623809814,
340
  "step": 200
341
  },
342
  {
343
- "epoch": 0.5270092226613966,
344
- "eval_logits/chosen": -4.771502494812012,
345
- "eval_logits/rejected": -4.989596843719482,
346
- "eval_logps/chosen": -396.1126708984375,
347
- "eval_logps/rejected": -511.37469482421875,
348
- "eval_loss": 8.072031021118164,
349
- "eval_rewards/accuracies": 0.2734375,
350
- "eval_rewards/chosen": -2.8191022872924805,
351
- "eval_rewards/margins": -6.325590133666992,
352
- "eval_rewards/rejected": 3.5064878463745117,
353
- "eval_runtime": 97.9327,
354
- "eval_samples_per_second": 20.422,
355
- "eval_steps_per_second": 0.327,
356
  "step": 200
357
  },
358
  {
359
- "epoch": 0.5533596837944664,
360
- "grad_norm": 1103.8948285514384,
361
- "learning_rate": 2.465452793317865e-07,
362
- "logits/chosen": -4.7866034507751465,
363
- "logits/rejected": -5.081458568572998,
364
- "logps/chosen": -228.73922729492188,
365
- "logps/rejected": -201.91409301757812,
366
- "loss": 0.5681,
367
- "rewards/accuracies": 0.8687499761581421,
368
- "rewards/chosen": 2.7976126670837402,
369
- "rewards/margins": 7.8736419677734375,
370
- "rewards/rejected": -5.076028823852539,
371
  "step": 210
372
  },
373
  {
374
- "epoch": 0.5797101449275363,
375
- "grad_norm": 715.6592810091468,
376
- "learning_rate": 2.2356249022388789e-07,
377
- "logits/chosen": -4.758913516998291,
378
- "logits/rejected": -5.04398250579834,
379
- "logps/chosen": -205.92568969726562,
380
- "logps/rejected": -170.3586883544922,
381
- "loss": 0.4895,
382
- "rewards/accuracies": 0.856249988079071,
383
- "rewards/chosen": 4.249292373657227,
384
- "rewards/margins": 7.541260719299316,
385
- "rewards/rejected": -3.291968822479248,
386
  "step": 220
387
  },
388
  {
389
- "epoch": 0.6060606060606061,
390
- "grad_norm": 944.8747719910025,
391
- "learning_rate": 2.0080393659578038e-07,
392
- "logits/chosen": -4.764594078063965,
393
- "logits/rejected": -5.100175857543945,
394
- "logps/chosen": -210.0486297607422,
395
- "logps/rejected": -179.91928100585938,
396
- "loss": 0.5197,
397
- "rewards/accuracies": 0.8687499761581421,
398
- "rewards/chosen": 5.489472389221191,
399
- "rewards/margins": 8.828967094421387,
400
- "rewards/rejected": -3.3394947052001953,
401
  "step": 230
402
  },
403
  {
404
- "epoch": 0.6324110671936759,
405
- "grad_norm": 1267.6123006349214,
406
- "learning_rate": 1.7846265006183976e-07,
407
- "logits/chosen": -4.821089744567871,
408
- "logits/rejected": -5.0871124267578125,
409
- "logps/chosen": -211.7589874267578,
410
- "logps/rejected": -181.33819580078125,
411
- "loss": 0.5444,
412
- "rewards/accuracies": 0.824999988079071,
413
- "rewards/chosen": -0.23131565749645233,
414
- "rewards/margins": 6.660643577575684,
415
- "rewards/rejected": -6.891959190368652,
416
  "step": 240
417
  },
418
  {
419
- "epoch": 0.6587615283267457,
420
- "grad_norm": 911.8592996107284,
421
- "learning_rate": 1.5672812309497722e-07,
422
- "logits/chosen": -4.781493663787842,
423
- "logits/rejected": -5.052074432373047,
424
- "logps/chosen": -201.82290649414062,
425
- "logps/rejected": -175.06719970703125,
426
- "loss": 0.5887,
427
  "rewards/accuracies": 0.856249988079071,
428
- "rewards/chosen": 2.856235980987549,
429
- "rewards/margins": 8.613394737243652,
430
- "rewards/rejected": -5.757159233093262,
431
  "step": 250
432
  },
433
  {
434
- "epoch": 0.6851119894598156,
435
- "grad_norm": 840.8020636445922,
436
- "learning_rate": 1.357847018050843e-07,
437
- "logits/chosen": -4.674568176269531,
438
- "logits/rejected": -4.961749076843262,
439
- "logps/chosen": -232.63119506835938,
440
- "logps/rejected": -204.2981719970703,
441
- "loss": 0.6421,
442
- "rewards/accuracies": 0.8062499761581421,
443
- "rewards/chosen": 2.906367778778076,
444
- "rewards/margins": 7.688606262207031,
445
- "rewards/rejected": -4.782238483428955,
446
  "step": 260
447
  },
448
  {
449
- "epoch": 0.7114624505928854,
450
- "grad_norm": 787.2425305884327,
451
- "learning_rate": 1.1581002236747328e-07,
452
- "logits/chosen": -4.637770652770996,
453
- "logits/rejected": -4.975251197814941,
454
- "logps/chosen": -190.36253356933594,
455
- "logps/rejected": -166.21788024902344,
456
- "loss": 0.5113,
457
  "rewards/accuracies": 0.856249988079071,
458
- "rewards/chosen": 2.2253878116607666,
459
- "rewards/margins": 8.868097305297852,
460
- "rewards/rejected": -6.6427106857299805,
461
  "step": 270
462
  },
463
  {
464
- "epoch": 0.7378129117259552,
465
- "grad_norm": 1064.0700290859438,
466
- "learning_rate": 9.697350436308427e-08,
467
- "logits/chosen": -4.585692405700684,
468
- "logits/rejected": -4.871885776519775,
469
- "logps/chosen": -229.0143585205078,
470
- "logps/rejected": -194.08761596679688,
471
- "loss": 0.5252,
472
- "rewards/accuracies": 0.8374999761581421,
473
- "rewards/chosen": 3.0419421195983887,
474
- "rewards/margins": 8.473891258239746,
475
- "rewards/rejected": -5.431949138641357,
476
  "step": 280
477
  },
478
  {
479
- "epoch": 0.764163372859025,
480
- "grad_norm": 839.1911415542294,
481
- "learning_rate": 7.943491380952188e-08,
482
- "logits/chosen": -4.828024864196777,
483
- "logits/rejected": -5.060397148132324,
484
- "logps/chosen": -196.42611694335938,
485
- "logps/rejected": -171.01446533203125,
486
- "loss": 0.4822,
487
- "rewards/accuracies": 0.862500011920929,
488
- "rewards/chosen": 4.435191631317139,
489
- "rewards/margins": 8.396050453186035,
490
- "rewards/rejected": -3.9608588218688965,
491
  "step": 290
492
  },
493
  {
494
- "epoch": 0.7905138339920948,
495
- "grad_norm": 887.4606387846492,
496
- "learning_rate": 6.334300807088508e-08,
497
- "logits/chosen": -4.608688831329346,
498
- "logits/rejected": -4.9593987464904785,
499
- "logps/chosen": -195.76962280273438,
500
- "logps/rejected": -164.97048950195312,
501
- "loss": 0.5556,
502
- "rewards/accuracies": 0.893750011920929,
503
- "rewards/chosen": 3.875316619873047,
504
- "rewards/margins": 8.369561195373535,
505
- "rewards/rejected": -4.4942450523376465,
506
  "step": 300
507
  },
508
  {
509
- "epoch": 0.7905138339920948,
510
- "eval_logits/chosen": -4.660388469696045,
511
- "eval_logits/rejected": -4.890798091888428,
512
- "eval_logps/chosen": -399.5094909667969,
513
- "eval_logps/rejected": -517.530029296875,
514
- "eval_loss": 6.923000335693359,
515
- "eval_rewards/accuracies": 0.31640625,
516
- "eval_rewards/chosen": -4.517519474029541,
517
- "eval_rewards/margins": -4.946353435516357,
518
- "eval_rewards/rejected": 0.42883408069610596,
519
- "eval_runtime": 99.1917,
520
- "eval_samples_per_second": 20.163,
521
- "eval_steps_per_second": 0.323,
522
  "step": 300
523
  },
524
  {
525
- "epoch": 0.8168642951251647,
526
- "grad_norm": 1030.5057638496492,
527
- "learning_rate": 4.8834274139883084e-08,
528
- "logits/chosen": -4.663365840911865,
529
- "logits/rejected": -5.023074150085449,
530
- "logps/chosen": -201.54336547851562,
531
- "logps/rejected": -171.71324157714844,
532
- "loss": 0.533,
533
- "rewards/accuracies": 0.8812500238418579,
534
- "rewards/chosen": 4.074584007263184,
535
- "rewards/margins": 8.798391342163086,
536
- "rewards/rejected": -4.723808765411377,
537
  "step": 310
538
  },
539
  {
540
- "epoch": 0.8432147562582345,
541
- "grad_norm": 1279.040936993484,
542
- "learning_rate": 3.60317709937693e-08,
543
- "logits/chosen": -4.707262992858887,
544
- "logits/rejected": -5.011012077331543,
545
- "logps/chosen": -223.0199432373047,
546
- "logps/rejected": -183.51393127441406,
547
- "loss": 0.5239,
548
- "rewards/accuracies": 0.8187500238418579,
549
- "rewards/chosen": 3.1946961879730225,
550
- "rewards/margins": 7.443505764007568,
551
- "rewards/rejected": -4.248808860778809,
552
  "step": 320
553
  },
554
  {
555
- "epoch": 0.8695652173913043,
556
- "grad_norm": 790.9511672313881,
557
- "learning_rate": 2.5044085842905683e-08,
558
- "logits/chosen": -4.707150459289551,
559
- "logits/rejected": -4.967694282531738,
560
- "logps/chosen": -208.9776153564453,
561
- "logps/rejected": -184.1087188720703,
562
- "loss": 0.5674,
563
- "rewards/accuracies": 0.918749988079071,
564
- "rewards/chosen": 4.723270893096924,
565
- "rewards/margins": 9.567978858947754,
566
- "rewards/rejected": -4.844708442687988,
567
  "step": 330
568
  },
569
  {
570
- "epoch": 0.8959156785243741,
571
- "grad_norm": 1075.929120672392,
572
- "learning_rate": 1.5964413124758493e-08,
573
- "logits/chosen": -4.645999431610107,
574
- "logits/rejected": -4.947402477264404,
575
- "logps/chosen": -212.19015502929688,
576
- "logps/rejected": -185.76678466796875,
577
- "loss": 0.5578,
578
- "rewards/accuracies": 0.875,
579
- "rewards/chosen": 4.216076374053955,
580
- "rewards/margins": 8.17945671081543,
581
- "rewards/rejected": -3.9633796215057373,
582
  "step": 340
583
  },
584
  {
585
- "epoch": 0.922266139657444,
586
- "grad_norm": 1055.0113319220882,
587
- "learning_rate": 8.869764055041501e-09,
588
- "logits/chosen": -4.714905738830566,
589
- "logits/rejected": -4.937991619110107,
590
- "logps/chosen": -215.5680389404297,
591
- "logps/rejected": -200.6318359375,
592
- "loss": 0.5113,
593
- "rewards/accuracies": 0.793749988079071,
594
- "rewards/chosen": 2.9399425983428955,
595
- "rewards/margins": 7.131015777587891,
596
- "rewards/rejected": -4.191073417663574,
597
- "step": 350
598
- },
599
- {
600
- "epoch": 0.9486166007905138,
601
- "grad_norm": 1003.3923870155849,
602
- "learning_rate": 3.82031344036729e-09,
603
- "logits/chosen": -4.614500999450684,
604
- "logits/rejected": -4.9195661544799805,
605
- "logps/chosen": -216.197998046875,
606
- "logps/rejected": -186.8596649169922,
607
- "loss": 0.4994,
608
- "rewards/accuracies": 0.8187500238418579,
609
- "rewards/chosen": 2.650672197341919,
610
- "rewards/margins": 6.149393081665039,
611
- "rewards/rejected": -3.49872088432312,
612
- "step": 360
613
- },
614
- {
615
- "epoch": 0.9749670619235836,
616
- "grad_norm": 1021.842139855702,
617
- "learning_rate": 8.588892925590063e-10,
618
- "logits/chosen": -4.69917631149292,
619
- "logits/rejected": -5.054296016693115,
620
- "logps/chosen": -212.79714965820312,
621
- "logps/rejected": -171.7442169189453,
622
- "loss": 0.5535,
623
- "rewards/accuracies": 0.8500000238418579,
624
- "rewards/chosen": 3.2063796520233154,
625
- "rewards/margins": 8.728368759155273,
626
- "rewards/rejected": -5.521987438201904,
627
- "step": 370
628
- },
629
- {
630
- "epoch": 0.9986824769433466,
631
- "step": 379,
632
  "total_flos": 0.0,
633
- "train_loss": 0.5517442987587962,
634
- "train_runtime": 6181.8185,
635
- "train_samples_per_second": 7.85,
636
- "train_steps_per_second": 0.061
637
  }
638
  ],
639
  "logging_steps": 10,
640
- "max_steps": 379,
641
  "num_input_tokens_seen": 0,
642
  "num_train_epochs": 1,
643
  "save_steps": 100,
644
- "stateful_callbacks": {
645
- "TrainerControl": {
646
- "args": {
647
- "should_epoch_stop": false,
648
- "should_evaluate": false,
649
- "should_log": false,
650
- "should_save": true,
651
- "should_training_stop": false
652
- },
653
- "attributes": {}
654
- }
655
- },
656
  "total_flos": 0.0,
657
  "train_batch_size": 8,
658
  "trial_name": null,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9985693848354793,
5
  "eval_steps": 100,
6
+ "global_step": 349,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0,
13
+ "grad_norm": 1482.8898632632786,
14
+ "learning_rate": 1.4285714285714284e-09,
15
+ "logits/chosen": -4.490396976470947,
16
+ "logits/rejected": -4.787891387939453,
17
+ "logps/chosen": -300.56573486328125,
18
+ "logps/rejected": -263.39849853515625,
19
  "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.03,
28
+ "grad_norm": 1626.423546399318,
29
+ "learning_rate": 1.4285714285714284e-08,
30
+ "logits/chosen": -4.29224967956543,
31
+ "logits/rejected": -4.469963550567627,
32
+ "logps/chosen": -285.7841796875,
33
+ "logps/rejected": -241.37472534179688,
34
+ "loss": 0.7238,
35
+ "rewards/accuracies": 0.4097222089767456,
36
+ "rewards/chosen": 0.00654969597235322,
37
+ "rewards/margins": -0.022695984691381454,
38
+ "rewards/rejected": 0.029245682060718536,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.06,
43
+ "grad_norm": 1614.6738239528886,
44
+ "learning_rate": 2.857142857142857e-08,
45
+ "logits/chosen": -4.210062026977539,
46
+ "logits/rejected": -4.502069473266602,
47
+ "logps/chosen": -312.73968505859375,
48
+ "logps/rejected": -247.76315307617188,
49
+ "loss": 0.7059,
50
+ "rewards/accuracies": 0.4625000059604645,
51
+ "rewards/chosen": 0.01807965151965618,
52
+ "rewards/margins": 0.015493685379624367,
53
+ "rewards/rejected": 0.0025859654415398836,
54
  "step": 20
55
  },
56
  {
57
+ "epoch": 0.09,
58
+ "grad_norm": 1444.832849324016,
59
+ "learning_rate": 4.285714285714285e-08,
60
+ "logits/chosen": -4.140606880187988,
61
+ "logits/rejected": -4.359016418457031,
62
+ "logps/chosen": -323.00701904296875,
63
+ "logps/rejected": -274.5583801269531,
64
+ "loss": 0.6754,
65
+ "rewards/accuracies": 0.6812499761581421,
66
+ "rewards/chosen": 0.26756447553634644,
67
+ "rewards/margins": 0.19910377264022827,
68
+ "rewards/rejected": 0.06846068799495697,
69
  "step": 30
70
  },
71
  {
72
+ "epoch": 0.11,
73
+ "grad_norm": 1322.1199574632033,
74
+ "learning_rate": 4.9968724814144246e-08,
75
+ "logits/chosen": -4.296597480773926,
76
+ "logits/rejected": -4.558178901672363,
77
+ "logps/chosen": -277.67041015625,
78
+ "logps/rejected": -227.9638214111328,
79
+ "loss": 0.5887,
80
+ "rewards/accuracies": 0.731249988079071,
81
+ "rewards/chosen": 0.46597638726234436,
82
+ "rewards/margins": 0.30959779024124146,
83
+ "rewards/rejected": 0.1563786268234253,
84
  "step": 40
85
  },
86
  {
87
+ "epoch": 0.14,
88
+ "grad_norm": 1226.7969075975607,
89
+ "learning_rate": 4.971899263739325e-08,
90
+ "logits/chosen": -4.339926719665527,
91
+ "logits/rejected": -4.592724323272705,
92
+ "logps/chosen": -270.9559020996094,
93
+ "logps/rejected": -228.2990264892578,
94
+ "loss": 0.5099,
95
+ "rewards/accuracies": 0.7437499761581421,
96
+ "rewards/chosen": 0.9494625329971313,
97
+ "rewards/margins": 0.5203765034675598,
98
+ "rewards/rejected": 0.4290861189365387,
99
  "step": 50
100
  },
101
  {
102
+ "epoch": 0.17,
103
+ "grad_norm": 929.2104534934765,
104
+ "learning_rate": 4.9222026055025726e-08,
105
+ "logits/chosen": -4.333067893981934,
106
+ "logits/rejected": -4.564365386962891,
107
+ "logps/chosen": -303.43310546875,
108
+ "logps/rejected": -252.1361846923828,
109
+ "loss": 0.4317,
110
+ "rewards/accuracies": 0.762499988079071,
111
+ "rewards/chosen": 1.6878736019134521,
112
+ "rewards/margins": 1.0692347288131714,
113
+ "rewards/rejected": 0.6186389923095703,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 0.2,
118
+ "grad_norm": 1085.9024365118285,
119
+ "learning_rate": 4.8482795627104736e-08,
120
+ "logits/chosen": -4.332296371459961,
121
+ "logits/rejected": -4.567011833190918,
122
+ "logps/chosen": -289.8699645996094,
123
+ "logps/rejected": -241.077880859375,
124
+ "loss": 0.4089,
125
+ "rewards/accuracies": 0.862500011920929,
126
+ "rewards/chosen": 1.8221668004989624,
127
+ "rewards/margins": 1.204461693763733,
128
+ "rewards/rejected": 0.6177049875259399,
129
  "step": 70
130
  },
131
  {
132
+ "epoch": 0.23,
133
+ "grad_norm": 978.9007166471121,
134
+ "learning_rate": 4.750869498807735e-08,
135
+ "logits/chosen": -4.28688383102417,
136
+ "logits/rejected": -4.543013572692871,
137
+ "logps/chosen": -322.56048583984375,
138
+ "logps/rejected": -271.0233154296875,
139
+ "loss": 0.3625,
140
+ "rewards/accuracies": 0.875,
141
+ "rewards/chosen": 2.1792995929718018,
142
+ "rewards/margins": 1.4446518421173096,
143
+ "rewards/rejected": 0.7346473932266235,
144
  "step": 80
145
  },
146
  {
147
+ "epoch": 0.26,
148
+ "grad_norm": 755.2675019751182,
149
+ "learning_rate": 4.630946689712609e-08,
150
+ "logits/chosen": -4.393240928649902,
151
+ "logits/rejected": -4.636073112487793,
152
+ "logps/chosen": -283.79290771484375,
153
+ "logps/rejected": -234.5099334716797,
154
+ "loss": 0.3436,
155
+ "rewards/accuracies": 0.862500011920929,
156
+ "rewards/chosen": 2.397665500640869,
157
+ "rewards/margins": 1.638594388961792,
158
+ "rewards/rejected": 0.7590711116790771,
159
  "step": 90
160
  },
161
  {
162
+ "epoch": 0.29,
163
+ "grad_norm": 893.2972482207422,
164
+ "learning_rate": 4.4897105793046024e-08,
165
+ "logits/chosen": -4.302220344543457,
166
+ "logits/rejected": -4.582569122314453,
167
+ "logps/chosen": -270.37457275390625,
168
+ "logps/rejected": -229.5946502685547,
169
+ "loss": 0.3652,
170
  "rewards/accuracies": 0.8374999761581421,
171
+ "rewards/chosen": 2.673680067062378,
172
+ "rewards/margins": 2.020587921142578,
173
+ "rewards/rejected": 0.6530919075012207,
174
  "step": 100
175
  },
176
  {
177
+ "epoch": 0.29,
178
+ "eval_logits/chosen": -4.696198463439941,
179
+ "eval_logits/rejected": -4.924900054931641,
180
+ "eval_logps/chosen": -403.4751892089844,
181
+ "eval_logps/rejected": -519.0816650390625,
182
+ "eval_loss": 1.7488452196121216,
183
+ "eval_rewards/accuracies": 0.3515625,
184
+ "eval_rewards/chosen": -2.2159295082092285,
185
+ "eval_rewards/margins": -1.206247329711914,
186
+ "eval_rewards/rejected": -1.009682059288025,
187
+ "eval_runtime": 97.4797,
188
+ "eval_samples_per_second": 20.517,
189
+ "eval_steps_per_second": 0.328,
190
  "step": 100
191
  },
192
  {
193
+ "epoch": 0.31,
194
+ "grad_norm": 872.5882367590035,
195
+ "learning_rate": 4.328573782827409e-08,
196
+ "logits/chosen": -4.3329758644104,
197
+ "logits/rejected": -4.617272853851318,
198
+ "logps/chosen": -296.02618408203125,
199
+ "logps/rejected": -242.73428344726562,
200
+ "loss": 0.349,
201
+ "rewards/accuracies": 0.875,
202
+ "rewards/chosen": 2.7564496994018555,
203
+ "rewards/margins": 2.076282024383545,
204
+ "rewards/rejected": 0.6801677942276001,
205
  "step": 110
206
  },
207
  {
208
+ "epoch": 0.34,
209
+ "grad_norm": 1043.3393138533181,
210
+ "learning_rate": 4.1491479581946166e-08,
211
+ "logits/chosen": -4.3465352058410645,
212
+ "logits/rejected": -4.575117111206055,
213
+ "logps/chosen": -300.99859619140625,
214
+ "logps/rejected": -265.1387023925781,
215
+ "loss": 0.3567,
216
+ "rewards/accuracies": 0.8500000238418579,
217
+ "rewards/chosen": 2.6209876537323,
218
+ "rewards/margins": 1.951716423034668,
219
+ "rewards/rejected": 0.6692714095115662,
220
  "step": 120
221
  },
222
  {
223
+ "epoch": 0.37,
224
+ "grad_norm": 837.2392359904699,
225
+ "learning_rate": 3.953227686510564e-08,
226
+ "logits/chosen": -4.319238185882568,
227
+ "logits/rejected": -4.565236568450928,
228
+ "logps/chosen": -288.36688232421875,
229
+ "logps/rejected": -240.29177856445312,
230
+ "loss": 0.3089,
231
+ "rewards/accuracies": 0.887499988079071,
232
+ "rewards/chosen": 2.86527419090271,
233
+ "rewards/margins": 2.168524980545044,
234
+ "rewards/rejected": 0.6967490911483765,
235
  "step": 130
236
  },
237
  {
238
+ "epoch": 0.4,
239
+ "grad_norm": 1077.8316620275032,
240
+ "learning_rate": 3.7427725230301354e-08,
241
+ "logits/chosen": -4.310965538024902,
242
+ "logits/rejected": -4.550887584686279,
243
+ "logps/chosen": -295.83868408203125,
244
+ "logps/rejected": -240.91311645507812,
245
+ "loss": 0.3113,
246
+ "rewards/accuracies": 0.8812500238418579,
247
+ "rewards/chosen": 3.000100612640381,
248
+ "rewards/margins": 2.261690855026245,
249
+ "rewards/rejected": 0.7384099960327148,
250
  "step": 140
251
  },
252
  {
253
+ "epoch": 0.43,
254
+ "grad_norm": 805.166891537404,
255
+ "learning_rate": 3.5198873980801955e-08,
256
+ "logits/chosen": -4.3193488121032715,
257
+ "logits/rejected": -4.625092506408691,
258
+ "logps/chosen": -294.2840881347656,
259
+ "logps/rejected": -245.18655395507812,
260
+ "loss": 0.318,
261
+ "rewards/accuracies": 0.8374999761581421,
262
+ "rewards/chosen": 3.1946704387664795,
263
+ "rewards/margins": 2.1761879920959473,
264
+ "rewards/rejected": 1.0184824466705322,
265
  "step": 150
266
  },
267
  {
268
+ "epoch": 0.46,
269
+ "grad_norm": 711.9582620759091,
270
+ "learning_rate": 3.2868015639687205e-08,
271
+ "logits/chosen": -4.259413242340088,
272
+ "logits/rejected": -4.450949668884277,
273
+ "logps/chosen": -311.91326904296875,
274
+ "logps/rejected": -270.4508972167969,
275
+ "loss": 0.3344,
276
+ "rewards/accuracies": 0.8812500238418579,
277
+ "rewards/chosen": 3.2649455070495605,
278
+ "rewards/margins": 2.298511266708374,
279
+ "rewards/rejected": 0.966434121131897,
280
  "step": 160
281
  },
282
  {
283
+ "epoch": 0.49,
284
+ "grad_norm": 894.588925085696,
285
+ "learning_rate": 3.0458462984504134e-08,
286
+ "logits/chosen": -4.303664207458496,
287
+ "logits/rejected": -4.500936031341553,
288
+ "logps/chosen": -274.3706359863281,
289
+ "logps/rejected": -235.48678588867188,
290
+ "loss": 0.2963,
291
+ "rewards/accuracies": 0.856249988079071,
292
+ "rewards/chosen": 3.1083619594573975,
293
+ "rewards/margins": 2.440159559249878,
294
+ "rewards/rejected": 0.6682023406028748,
295
  "step": 170
296
  },
297
  {
298
+ "epoch": 0.52,
299
+ "grad_norm": 787.4594621489149,
300
+ "learning_rate": 2.7994315877542628e-08,
301
+ "logits/chosen": -4.386083602905273,
302
+ "logits/rejected": -4.545100212097168,
303
+ "logps/chosen": -263.3323059082031,
304
+ "logps/rejected": -228.91329956054688,
305
+ "loss": 0.329,
306
+ "rewards/accuracies": 0.8500000238418579,
307
+ "rewards/chosen": 2.6448895931243896,
308
+ "rewards/margins": 2.054766893386841,
309
+ "rewards/rejected": 0.5901231169700623,
310
  "step": 180
311
  },
312
  {
313
+ "epoch": 0.54,
314
+ "grad_norm": 1024.0437070924656,
315
+ "learning_rate": 2.5500220223847356e-08,
316
+ "logits/chosen": -4.346238613128662,
317
+ "logits/rejected": -4.559948444366455,
318
+ "logps/chosen": -290.6450500488281,
319
+ "logps/rejected": -252.0496063232422,
320
+ "loss": 0.3385,
321
+ "rewards/accuracies": 0.8374999761581421,
322
+ "rewards/chosen": 3.277946949005127,
323
+ "rewards/margins": 2.545849323272705,
324
+ "rewards/rejected": 0.7320975661277771,
325
  "step": 190
326
  },
327
  {
328
+ "epoch": 0.57,
329
+ "grad_norm": 937.2977444102543,
330
+ "learning_rate": 2.3001121467819626e-08,
331
+ "logits/chosen": -4.290076732635498,
332
+ "logits/rejected": -4.555208683013916,
333
+ "logps/chosen": -317.6783447265625,
334
+ "logps/rejected": -267.9818115234375,
335
+ "loss": 0.312,
336
+ "rewards/accuracies": 0.824999988079071,
337
+ "rewards/chosen": 3.4544365406036377,
338
+ "rewards/margins": 2.3504996299743652,
339
+ "rewards/rejected": 1.1039369106292725,
340
  "step": 200
341
  },
342
  {
343
+ "epoch": 0.57,
344
+ "eval_logits/chosen": -4.7096052169799805,
345
+ "eval_logits/rejected": -4.93910026550293,
346
+ "eval_logps/chosen": -405.4331970214844,
347
+ "eval_logps/rejected": -520.6950073242188,
348
+ "eval_loss": 1.959585189819336,
349
+ "eval_rewards/accuracies": 0.33984375,
350
+ "eval_rewards/chosen": -3.194929599761963,
351
+ "eval_rewards/margins": -1.3785794973373413,
352
+ "eval_rewards/rejected": -1.8163501024246216,
353
+ "eval_runtime": 97.467,
354
+ "eval_samples_per_second": 20.52,
355
+ "eval_steps_per_second": 0.328,
356
  "step": 200
357
  },
358
  {
359
+ "epoch": 0.6,
360
+ "grad_norm": 839.0813036142636,
361
+ "learning_rate": 2.0522015093886612e-08,
362
+ "logits/chosen": -4.317226409912109,
363
+ "logits/rejected": -4.618459224700928,
364
+ "logps/chosen": -294.03253173828125,
365
+ "logps/rejected": -234.196533203125,
366
+ "loss": 0.2938,
367
+ "rewards/accuracies": 0.8500000238418579,
368
+ "rewards/chosen": 3.499882459640503,
369
+ "rewards/margins": 2.7310853004455566,
370
+ "rewards/rejected": 0.7687975168228149,
371
  "step": 210
372
  },
373
  {
374
+ "epoch": 0.63,
375
+ "grad_norm": 996.0408528817461,
376
+ "learning_rate": 1.808769662668035e-08,
377
+ "logits/chosen": -4.358494758605957,
378
+ "logits/rejected": -4.704705238342285,
379
+ "logps/chosen": -299.7410888671875,
380
+ "logps/rejected": -235.092041015625,
381
+ "loss": 0.2943,
382
+ "rewards/accuracies": 0.8999999761581421,
383
+ "rewards/chosen": 3.0801563262939453,
384
+ "rewards/margins": 2.7920236587524414,
385
+ "rewards/rejected": 0.28813308477401733,
386
  "step": 220
387
  },
388
  {
389
+ "epoch": 0.66,
390
+ "grad_norm": 781.9269603792492,
391
+ "learning_rate": 1.5722513631174444e-08,
392
+ "logits/chosen": -4.392434597015381,
393
+ "logits/rejected": -4.748046398162842,
394
+ "logps/chosen": -289.19927978515625,
395
+ "logps/rejected": -234.593017578125,
396
+ "loss": 0.3084,
397
+ "rewards/accuracies": 0.856249988079071,
398
+ "rewards/chosen": 2.994288921356201,
399
+ "rewards/margins": 2.508836507797241,
400
+ "rewards/rejected": 0.4854525625705719,
401
  "step": 230
402
  },
403
  {
404
+ "epoch": 0.69,
405
+ "grad_norm": 795.1407214961006,
406
+ "learning_rate": 1.345012219322345e-08,
407
+ "logits/chosen": -4.238420486450195,
408
+ "logits/rejected": -4.548759460449219,
409
+ "logps/chosen": -285.19232177734375,
410
+ "logps/rejected": -240.72509765625,
411
+ "loss": 0.3269,
412
+ "rewards/accuracies": 0.8687499761581421,
413
+ "rewards/chosen": 2.8941564559936523,
414
+ "rewards/margins": 2.3365750312805176,
415
+ "rewards/rejected": 0.5575811266899109,
416
  "step": 240
417
  },
418
  {
419
+ "epoch": 0.72,
420
+ "grad_norm": 879.951904317175,
421
+ "learning_rate": 1.1293250316137664e-08,
422
+ "logits/chosen": -4.293431282043457,
423
+ "logits/rejected": -4.5221476554870605,
424
+ "logps/chosen": -294.2128601074219,
425
+ "logps/rejected": -254.7281494140625,
426
+ "loss": 0.3131,
427
  "rewards/accuracies": 0.856249988079071,
428
+ "rewards/chosen": 3.089404821395874,
429
+ "rewards/margins": 2.4765713214874268,
430
+ "rewards/rejected": 0.6128337383270264,
431
  "step": 250
432
  },
433
  {
434
+ "epoch": 0.74,
435
+ "grad_norm": 1094.0422336259135,
436
+ "learning_rate": 9.273470599753375e-09,
437
+ "logits/chosen": -4.372658729553223,
438
+ "logits/rejected": -4.525150775909424,
439
+ "logps/chosen": -294.89874267578125,
440
+ "logps/rejected": -263.60577392578125,
441
+ "loss": 0.3409,
442
+ "rewards/accuracies": 0.831250011920929,
443
+ "rewards/chosen": 2.7965197563171387,
444
+ "rewards/margins": 2.07285737991333,
445
+ "rewards/rejected": 0.7236624956130981,
446
  "step": 260
447
  },
448
  {
449
+ "epoch": 0.77,
450
+ "grad_norm": 775.0184329679462,
451
+ "learning_rate": 7.410984475616819e-09,
452
+ "logits/chosen": -4.244694709777832,
453
+ "logits/rejected": -4.5279622077941895,
454
+ "logps/chosen": -282.0103454589844,
455
+ "logps/rejected": -239.31399536132812,
456
+ "loss": 0.2834,
457
  "rewards/accuracies": 0.856249988079071,
458
+ "rewards/chosen": 3.1753017902374268,
459
+ "rewards/margins": 2.7872982025146484,
460
+ "rewards/rejected": 0.38800328969955444,
461
  "step": 270
462
  },
463
  {
464
+ "epoch": 0.8,
465
+ "grad_norm": 796.3667611732337,
466
+ "learning_rate": 5.724420156318405e-09,
467
+ "logits/chosen": -4.291975498199463,
468
+ "logits/rejected": -4.621099472045898,
469
+ "logps/chosen": -285.0769958496094,
470
+ "logps/rejected": -232.0653839111328,
471
+ "loss": 0.3144,
472
+ "rewards/accuracies": 0.8687499761581421,
473
+ "rewards/chosen": 2.847846508026123,
474
+ "rewards/margins": 2.3038220405578613,
475
+ "rewards/rejected": 0.5440241098403931,
476
  "step": 280
477
  },
478
  {
479
+ "epoch": 0.83,
480
+ "grad_norm": 1101.8200803185337,
481
+ "learning_rate": 4.230646319847259e-09,
482
+ "logits/chosen": -4.424475193023682,
483
+ "logits/rejected": -4.705140113830566,
484
+ "logps/chosen": -295.53857421875,
485
+ "logps/rejected": -232.05245971679688,
486
+ "loss": 0.3121,
487
+ "rewards/accuracies": 0.893750011920929,
488
+ "rewards/chosen": 2.7051048278808594,
489
+ "rewards/margins": 2.4193003177642822,
490
+ "rewards/rejected": 0.2858046591281891,
491
  "step": 290
492
  },
493
  {
494
+ "epoch": 0.86,
495
+ "grad_norm": 1165.1379404537536,
496
+ "learning_rate": 2.944603392457931e-09,
497
+ "logits/chosen": -4.335474491119385,
498
+ "logits/rejected": -4.49271297454834,
499
+ "logps/chosen": -292.6704406738281,
500
+ "logps/rejected": -252.3407440185547,
501
+ "loss": 0.2993,
502
+ "rewards/accuracies": 0.831250011920929,
503
+ "rewards/chosen": 3.1620662212371826,
504
+ "rewards/margins": 2.2422008514404297,
505
+ "rewards/rejected": 0.9198653101921082,
506
  "step": 300
507
  },
508
  {
509
+ "epoch": 0.86,
510
+ "eval_logits/chosen": -4.706819534301758,
511
+ "eval_logits/rejected": -4.936363697052002,
512
+ "eval_logps/chosen": -405.887939453125,
513
+ "eval_logps/rejected": -521.2874755859375,
514
+ "eval_loss": 1.9827619791030884,
515
+ "eval_rewards/accuracies": 0.35546875,
516
+ "eval_rewards/chosen": -3.4222922325134277,
517
+ "eval_rewards/margins": -1.3097174167633057,
518
+ "eval_rewards/rejected": -2.112574815750122,
519
+ "eval_runtime": 97.4214,
520
+ "eval_samples_per_second": 20.529,
521
+ "eval_steps_per_second": 0.328,
522
  "step": 300
523
  },
524
  {
525
+ "epoch": 0.89,
526
+ "grad_norm": 1000.5341122694366,
527
+ "learning_rate": 1.8791541175240787e-09,
528
+ "logits/chosen": -4.3908562660217285,
529
+ "logits/rejected": -4.607417583465576,
530
+ "logps/chosen": -301.6288146972656,
531
+ "logps/rejected": -246.2473907470703,
532
+ "loss": 0.2942,
533
+ "rewards/accuracies": 0.862500011920929,
534
+ "rewards/chosen": 3.3309147357940674,
535
+ "rewards/margins": 2.577249765396118,
536
+ "rewards/rejected": 0.753665030002594,
537
  "step": 310
538
  },
539
  {
540
+ "epoch": 0.92,
541
+ "grad_norm": 942.4369602130394,
542
+ "learning_rate": 1.0449549049596136e-09,
543
+ "logits/chosen": -4.230744361877441,
544
+ "logits/rejected": -4.579751968383789,
545
+ "logps/chosen": -287.4718322753906,
546
+ "logps/rejected": -232.3734893798828,
547
+ "loss": 0.3078,
548
+ "rewards/accuracies": 0.862500011920929,
549
+ "rewards/chosen": 3.2846877574920654,
550
+ "rewards/margins": 2.4689810276031494,
551
+ "rewards/rejected": 0.8157066106796265,
552
  "step": 320
553
  },
554
  {
555
+ "epoch": 0.94,
556
+ "grad_norm": 1041.373917696444,
557
+ "learning_rate": 4.5034924794443707e-10,
558
+ "logits/chosen": -4.3931074142456055,
559
+ "logits/rejected": -4.620477199554443,
560
+ "logps/chosen": -280.0606994628906,
561
+ "logps/rejected": -233.14346313476562,
562
+ "loss": 0.2917,
563
+ "rewards/accuracies": 0.856249988079071,
564
+ "rewards/chosen": 3.2919604778289795,
565
+ "rewards/margins": 2.5820116996765137,
566
+ "rewards/rejected": 0.7099487781524658,
567
  "step": 330
568
  },
569
  {
570
+ "epoch": 0.97,
571
+ "grad_norm": 859.9709811153568,
572
+ "learning_rate": 1.0128427297940723e-10,
573
+ "logits/chosen": -4.328027248382568,
574
+ "logits/rejected": -4.5136332511901855,
575
+ "logps/chosen": -284.2748718261719,
576
+ "logps/rejected": -244.03817749023438,
577
+ "loss": 0.2952,
578
+ "rewards/accuracies": 0.862500011920929,
579
+ "rewards/chosen": 3.204352855682373,
580
+ "rewards/margins": 2.4567666053771973,
581
+ "rewards/rejected": 0.7475861310958862,
582
  "step": 340
583
  },
584
  {
585
+ "epoch": 1.0,
586
+ "step": 349,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  "total_flos": 0.0,
588
+ "train_loss": 0.3696309270011661,
589
+ "train_runtime": 5591.2149,
590
+ "train_samples_per_second": 7.991,
591
+ "train_steps_per_second": 0.062
592
  }
593
  ],
594
  "logging_steps": 10,
595
+ "max_steps": 349,
596
  "num_input_tokens_seen": 0,
597
  "num_train_epochs": 1,
598
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
599
  "total_flos": 0.0,
600
  "train_batch_size": 8,
601
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df9dd9975041330cb3031003f27f8952f7657e67478afc48ee43616047d272ea
3
- size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadd16e859cce8223cbc036d0b14a858573a3c16794cd0290547847afa9ddb52
3
+ size 6264