BraylonDash commited on
Commit
5a1daf8
1 Parent(s): 2bba83a

Model save

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
+ model-index:
10
+ - name: phi-2-gpo-test-longest-iter-v1-2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-gpo-test-longest-iter-v1-2
18
+
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0016
22
+ - Rewards/chosen: -0.0008
23
+ - Rewards/rejected: -0.0008
24
+ - Rewards/accuracies: 0.4910
25
+ - Rewards/margins: 0.0001
26
+ - Logps/rejected: -278.6518
27
+ - Logps/chosen: -306.3463
28
+ - Logits/rejected: 0.0888
29
+ - Logits/chosen: -0.0087
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 4
54
+ - total_train_batch_size: 16
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 4
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.0011 | 1.6 | 100 | 0.0016 | -0.0007 | -0.0008 | 0.4960 | 0.0001 | -278.6544 | -306.3417 | 0.0909 | -0.0073 |
65
+ | 0.0011 | 3.2 | 200 | 0.0016 | -0.0001 | -0.0005 | 0.4925 | 0.0003 | -278.6177 | -306.2834 | 0.0921 | -0.0047 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - PEFT 0.7.1
71
+ - Transformers 4.36.2
72
+ - Pytorch 2.2.1+cu121
73
+ - Datasets 2.14.6
74
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 32,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "k_proj",
23
+ "dense",
24
+ "v_proj",
25
+ "q_proj"
26
+ ],
27
+ "task_type": "CAUSAL_LM"
28
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de997ddbd4bcb3150c8d8c0cd2ad29c4a7509d92756c4cbe350e451e4381a94a
3
+ size 41977616
added_tokens.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "\t\t": 50294,
3
+ "\t\t\t": 50293,
4
+ "\t\t\t\t": 50292,
5
+ "\t\t\t\t\t": 50291,
6
+ "\t\t\t\t\t\t": 50290,
7
+ "\t\t\t\t\t\t\t": 50289,
8
+ "\t\t\t\t\t\t\t\t": 50288,
9
+ "\t\t\t\t\t\t\t\t\t": 50287,
10
+ " ": 50286,
11
+ " ": 50285,
12
+ " ": 50284,
13
+ " ": 50283,
14
+ " ": 50282,
15
+ " ": 50281,
16
+ " ": 50280,
17
+ " ": 50279,
18
+ " ": 50278,
19
+ " ": 50277,
20
+ " ": 50276,
21
+ " ": 50275,
22
+ " ": 50274,
23
+ " ": 50273,
24
+ " ": 50272,
25
+ " ": 50271,
26
+ " ": 50270,
27
+ " ": 50269,
28
+ " ": 50268,
29
+ " ": 50267,
30
+ " ": 50266,
31
+ " ": 50265,
32
+ " ": 50264,
33
+ " ": 50263,
34
+ " ": 50262,
35
+ " ": 50261,
36
+ " ": 50260,
37
+ " ": 50259,
38
+ " ": 50258,
39
+ " ": 50257
40
+ }
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "eval_logits/chosen": -0.00874541699886322,
4
+ "eval_logits/rejected": 0.0888209417462349,
5
+ "eval_logps/chosen": -306.3463439941406,
6
+ "eval_logps/rejected": -278.6518249511719,
7
+ "eval_loss": 0.00159652519505471,
8
+ "eval_rewards/accuracies": 0.4909999966621399,
9
+ "eval_rewards/chosen": -0.0007702131988480687,
10
+ "eval_rewards/margins": 5.095174856251106e-05,
11
+ "eval_rewards/rejected": -0.0008211650419980288,
12
+ "eval_runtime": 2003.1713,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 0.998,
15
+ "eval_steps_per_second": 0.25,
16
+ "train_loss": 0.0010954971686230911,
17
+ "train_runtime": 8408.6931,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 0.476,
20
+ "train_steps_per_second": 0.029
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "eval_logits/chosen": -0.00874541699886322,
4
+ "eval_logits/rejected": 0.0888209417462349,
5
+ "eval_logps/chosen": -306.3463439941406,
6
+ "eval_logps/rejected": -278.6518249511719,
7
+ "eval_loss": 0.00159652519505471,
8
+ "eval_rewards/accuracies": 0.4909999966621399,
9
+ "eval_rewards/chosen": -0.0007702131988480687,
10
+ "eval_rewards/margins": 5.095174856251106e-05,
11
+ "eval_rewards/rejected": -0.0008211650419980288,
12
+ "eval_runtime": 2003.1713,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 0.998,
15
+ "eval_steps_per_second": 0.25
16
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "50257": {
13
+ "content": " ",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": false
19
+ },
20
+ "50258": {
21
+ "content": " ",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ },
28
+ "50259": {
29
+ "content": " ",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": false
35
+ },
36
+ "50260": {
37
+ "content": " ",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": false
43
+ },
44
+ "50261": {
45
+ "content": " ",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ },
52
+ "50262": {
53
+ "content": " ",
54
+ "lstrip": false,
55
+ "normalized": true,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": false
59
+ },
60
+ "50263": {
61
+ "content": " ",
62
+ "lstrip": false,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": false
67
+ },
68
+ "50264": {
69
+ "content": " ",
70
+ "lstrip": false,
71
+ "normalized": true,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": false
75
+ },
76
+ "50265": {
77
+ "content": " ",
78
+ "lstrip": false,
79
+ "normalized": true,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": false
83
+ },
84
+ "50266": {
85
+ "content": " ",
86
+ "lstrip": false,
87
+ "normalized": true,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": false
91
+ },
92
+ "50267": {
93
+ "content": " ",
94
+ "lstrip": false,
95
+ "normalized": true,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": false
99
+ },
100
+ "50268": {
101
+ "content": " ",
102
+ "lstrip": false,
103
+ "normalized": true,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": false
107
+ },
108
+ "50269": {
109
+ "content": " ",
110
+ "lstrip": false,
111
+ "normalized": true,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": false
115
+ },
116
+ "50270": {
117
+ "content": " ",
118
+ "lstrip": false,
119
+ "normalized": true,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": false
123
+ },
124
+ "50271": {
125
+ "content": " ",
126
+ "lstrip": false,
127
+ "normalized": true,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": false
131
+ },
132
+ "50272": {
133
+ "content": " ",
134
+ "lstrip": false,
135
+ "normalized": true,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": false
139
+ },
140
+ "50273": {
141
+ "content": " ",
142
+ "lstrip": false,
143
+ "normalized": true,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": false
147
+ },
148
+ "50274": {
149
+ "content": " ",
150
+ "lstrip": false,
151
+ "normalized": true,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": false
155
+ },
156
+ "50275": {
157
+ "content": " ",
158
+ "lstrip": false,
159
+ "normalized": true,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": false
163
+ },
164
+ "50276": {
165
+ "content": " ",
166
+ "lstrip": false,
167
+ "normalized": true,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": false
171
+ },
172
+ "50277": {
173
+ "content": " ",
174
+ "lstrip": false,
175
+ "normalized": true,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": false
179
+ },
180
+ "50278": {
181
+ "content": " ",
182
+ "lstrip": false,
183
+ "normalized": true,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": false
187
+ },
188
+ "50279": {
189
+ "content": " ",
190
+ "lstrip": false,
191
+ "normalized": true,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": false
195
+ },
196
+ "50280": {
197
+ "content": " ",
198
+ "lstrip": false,
199
+ "normalized": true,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": false
203
+ },
204
+ "50281": {
205
+ "content": " ",
206
+ "lstrip": false,
207
+ "normalized": true,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": false
211
+ },
212
+ "50282": {
213
+ "content": " ",
214
+ "lstrip": false,
215
+ "normalized": true,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": false
219
+ },
220
+ "50283": {
221
+ "content": " ",
222
+ "lstrip": false,
223
+ "normalized": true,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": false
227
+ },
228
+ "50284": {
229
+ "content": " ",
230
+ "lstrip": false,
231
+ "normalized": true,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": false
235
+ },
236
+ "50285": {
237
+ "content": " ",
238
+ "lstrip": false,
239
+ "normalized": true,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": false
243
+ },
244
+ "50286": {
245
+ "content": " ",
246
+ "lstrip": false,
247
+ "normalized": true,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": false
251
+ },
252
+ "50287": {
253
+ "content": "\t\t\t\t\t\t\t\t\t",
254
+ "lstrip": false,
255
+ "normalized": true,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": false
259
+ },
260
+ "50288": {
261
+ "content": "\t\t\t\t\t\t\t\t",
262
+ "lstrip": false,
263
+ "normalized": true,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": false
267
+ },
268
+ "50289": {
269
+ "content": "\t\t\t\t\t\t\t",
270
+ "lstrip": false,
271
+ "normalized": true,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": false
275
+ },
276
+ "50290": {
277
+ "content": "\t\t\t\t\t\t",
278
+ "lstrip": false,
279
+ "normalized": true,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": false
283
+ },
284
+ "50291": {
285
+ "content": "\t\t\t\t\t",
286
+ "lstrip": false,
287
+ "normalized": true,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": false
291
+ },
292
+ "50292": {
293
+ "content": "\t\t\t\t",
294
+ "lstrip": false,
295
+ "normalized": true,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": false
299
+ },
300
+ "50293": {
301
+ "content": "\t\t\t",
302
+ "lstrip": false,
303
+ "normalized": true,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": false
307
+ },
308
+ "50294": {
309
+ "content": "\t\t",
310
+ "lstrip": false,
311
+ "normalized": true,
312
+ "rstrip": false,
313
+ "single_word": false,
314
+ "special": false
315
+ }
316
+ },
317
+ "bos_token": "<|endoftext|>",
318
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
319
+ "clean_up_tokenization_spaces": true,
320
+ "eos_token": "<|endoftext|>",
321
+ "model_max_length": 2048,
322
+ "pad_token": "<|endoftext|>",
323
+ "tokenizer_class": "CodeGenTokenizer",
324
+ "unk_token": "<|endoftext|>"
325
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "train_loss": 0.0010954971686230911,
4
+ "train_runtime": 8408.6931,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 0.476,
7
+ "train_steps_per_second": 0.029
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.968,
5
+ "eval_steps": 100,
6
+ "global_step": 248,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 2.0000000000000002e-07,
14
+ "logits/chosen": 0.10287265479564667,
15
+ "logits/rejected": 0.011988319456577301,
16
+ "logps/chosen": -192.40402221679688,
17
+ "logps/rejected": -100.80304718017578,
18
+ "loss": 0.001,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.16,
27
+ "learning_rate": 2.0000000000000003e-06,
28
+ "logits/chosen": 0.16615836322307587,
29
+ "logits/rejected": 0.08426308631896973,
30
+ "logps/chosen": -182.21653747558594,
31
+ "logps/rejected": -161.99514770507812,
32
+ "loss": 0.001,
33
+ "rewards/accuracies": 0.375,
34
+ "rewards/chosen": -0.0011453586630523205,
35
+ "rewards/margins": 0.0010826066136360168,
36
+ "rewards/rejected": -0.0022279650438576937,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.32,
41
+ "learning_rate": 4.000000000000001e-06,
42
+ "logits/chosen": 0.22168031334877014,
43
+ "logits/rejected": 0.2861151099205017,
44
+ "logps/chosen": -176.96937561035156,
45
+ "logps/rejected": -135.0897674560547,
46
+ "loss": 0.0012,
47
+ "rewards/accuracies": 0.36250001192092896,
48
+ "rewards/chosen": -0.0021877787075936794,
49
+ "rewards/margins": -0.000618638179730624,
50
+ "rewards/rejected": -0.0015691407024860382,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.48,
55
+ "learning_rate": 4.993800445762451e-06,
56
+ "logits/chosen": 0.2504645884037018,
57
+ "logits/rejected": 0.2488332986831665,
58
+ "logps/chosen": -169.92291259765625,
59
+ "logps/rejected": -150.56483459472656,
60
+ "loss": 0.0012,
61
+ "rewards/accuracies": 0.39375001192092896,
62
+ "rewards/chosen": -0.0013141350354999304,
63
+ "rewards/margins": -0.00041974737541750073,
64
+ "rewards/rejected": -0.0008943876018747687,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.64,
69
+ "learning_rate": 4.944388344834205e-06,
70
+ "logits/chosen": 0.20393869280815125,
71
+ "logits/rejected": 0.26991668343544006,
72
+ "logps/chosen": -172.50244140625,
73
+ "logps/rejected": -155.56478881835938,
74
+ "loss": 0.0012,
75
+ "rewards/accuracies": 0.40625,
76
+ "rewards/chosen": 0.0012658500345423818,
77
+ "rewards/margins": -0.0002958738768938929,
78
+ "rewards/rejected": 0.0015617238823324442,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.8,
83
+ "learning_rate": 4.8465431931347904e-06,
84
+ "logits/chosen": 0.23398549854755402,
85
+ "logits/rejected": 0.17997679114341736,
86
+ "logps/chosen": -198.8681182861328,
87
+ "logps/rejected": -161.630615234375,
88
+ "loss": 0.0011,
89
+ "rewards/accuracies": 0.4375,
90
+ "rewards/chosen": -0.0012974137207493186,
91
+ "rewards/margins": 0.0008641968597657979,
92
+ "rewards/rejected": -0.0021616104058921337,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.96,
97
+ "learning_rate": 4.702203692102539e-06,
98
+ "logits/chosen": 0.24316315352916718,
99
+ "logits/rejected": 0.29181909561157227,
100
+ "logps/chosen": -187.0422821044922,
101
+ "logps/rejected": -162.91946411132812,
102
+ "loss": 0.0012,
103
+ "rewards/accuracies": 0.3687500059604645,
104
+ "rewards/chosen": 3.171586286043748e-05,
105
+ "rewards/margins": -0.0004161152464803308,
106
+ "rewards/rejected": 0.00044783117482438684,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 1.12,
111
+ "learning_rate": 4.514229781074239e-06,
112
+ "logits/chosen": 0.23100349307060242,
113
+ "logits/rejected": 0.16395993530750275,
114
+ "logps/chosen": -195.23422241210938,
115
+ "logps/rejected": -167.4615020751953,
116
+ "loss": 0.001,
117
+ "rewards/accuracies": 0.4000000059604645,
118
+ "rewards/chosen": 4.4724576582666487e-05,
119
+ "rewards/margins": 0.0011552829528227448,
120
+ "rewards/rejected": -0.0011105581652373075,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 1.28,
125
+ "learning_rate": 4.286345970517195e-06,
126
+ "logits/chosen": 0.1470017284154892,
127
+ "logits/rejected": 0.1968422830104828,
128
+ "logps/chosen": -202.30715942382812,
129
+ "logps/rejected": -162.376953125,
130
+ "loss": 0.0011,
131
+ "rewards/accuracies": 0.375,
132
+ "rewards/chosen": 0.0012948224321007729,
133
+ "rewards/margins": 0.0006303158006630838,
134
+ "rewards/rejected": 0.00066450668964535,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 1.44,
139
+ "learning_rate": 4.023067544670082e-06,
140
+ "logits/chosen": 0.2470308095216751,
141
+ "logits/rejected": 0.2212987244129181,
142
+ "logps/chosen": -173.95779418945312,
143
+ "logps/rejected": -155.6954803466797,
144
+ "loss": 0.0012,
145
+ "rewards/accuracies": 0.3812499940395355,
146
+ "rewards/chosen": -0.0012559869792312384,
147
+ "rewards/margins": -0.000755336950533092,
148
+ "rewards/rejected": -0.0005006499122828245,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 1.6,
153
+ "learning_rate": 3.7296110958116845e-06,
154
+ "logits/chosen": 0.19465205073356628,
155
+ "logits/rejected": 0.17263346910476685,
156
+ "logps/chosen": -177.3841552734375,
157
+ "logps/rejected": -143.33177185058594,
158
+ "loss": 0.0011,
159
+ "rewards/accuracies": 0.3812499940395355,
160
+ "rewards/chosen": -0.0003903825709130615,
161
+ "rewards/margins": 8.392091694986448e-05,
162
+ "rewards/rejected": -0.0004743034369312227,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 1.6,
167
+ "eval_logits/chosen": -0.007278905715793371,
168
+ "eval_logits/rejected": 0.09087153524160385,
169
+ "eval_logps/chosen": -306.3416748046875,
170
+ "eval_logps/rejected": -278.65435791015625,
171
+ "eval_loss": 0.0015715558547526598,
172
+ "eval_rewards/accuracies": 0.4959999918937683,
173
+ "eval_rewards/chosen": -0.0007231617928482592,
174
+ "eval_rewards/margins": 0.00012325971329119056,
175
+ "eval_rewards/rejected": -0.0008464214624837041,
176
+ "eval_runtime": 933.3559,
177
+ "eval_samples_per_second": 2.143,
178
+ "eval_steps_per_second": 0.536,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 1.76,
183
+ "learning_rate": 3.4117911628292944e-06,
184
+ "logits/chosen": 0.29057276248931885,
185
+ "logits/rejected": 0.19747909903526306,
186
+ "logps/chosen": -173.1153564453125,
187
+ "logps/rejected": -151.88233947753906,
188
+ "loss": 0.0011,
189
+ "rewards/accuracies": 0.3187499940395355,
190
+ "rewards/chosen": -0.00022493198048323393,
191
+ "rewards/margins": -0.000448818871518597,
192
+ "rewards/rejected": 0.00022388689103536308,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 1.92,
197
+ "learning_rate": 3.075905022087675e-06,
198
+ "logits/chosen": 0.1814224272966385,
199
+ "logits/rejected": 0.2010256052017212,
200
+ "logps/chosen": -169.48214721679688,
201
+ "logps/rejected": -147.04067993164062,
202
+ "loss": 0.0011,
203
+ "rewards/accuracies": 0.41874998807907104,
204
+ "rewards/chosen": 0.0006150969420559704,
205
+ "rewards/margins": 0.0005108726327307522,
206
+ "rewards/rejected": 0.00010422446939628571,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 2.08,
211
+ "learning_rate": 2.728607913349464e-06,
212
+ "logits/chosen": 0.24528518319129944,
213
+ "logits/rejected": 0.2507859468460083,
214
+ "logps/chosen": -177.7463836669922,
215
+ "logps/rejected": -162.4386444091797,
216
+ "loss": 0.0011,
217
+ "rewards/accuracies": 0.38749998807907104,
218
+ "rewards/chosen": -0.0017223177710548043,
219
+ "rewards/margins": 0.0007541574887000024,
220
+ "rewards/rejected": -0.0024764754343777895,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 2.24,
225
+ "learning_rate": 2.376781173017589e-06,
226
+ "logits/chosen": 0.1746383160352707,
227
+ "logits/rejected": 0.15819591283798218,
228
+ "logps/chosen": -175.5349884033203,
229
+ "logps/rejected": -150.61404418945312,
230
+ "loss": 0.0012,
231
+ "rewards/accuracies": 0.3812499940395355,
232
+ "rewards/chosen": -0.0010503135854378343,
233
+ "rewards/margins": -0.0006477275164797902,
234
+ "rewards/rejected": -0.0004025862435810268,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 2.4,
239
+ "learning_rate": 2.0273958875043877e-06,
240
+ "logits/chosen": 0.16554135084152222,
241
+ "logits/rejected": 0.20675285160541534,
242
+ "logps/chosen": -198.78173828125,
243
+ "logps/rejected": -161.86558532714844,
244
+ "loss": 0.0011,
245
+ "rewards/accuracies": 0.3812499940395355,
246
+ "rewards/chosen": -0.001666503376327455,
247
+ "rewards/margins": 0.0006793343345634639,
248
+ "rewards/rejected": -0.0023458378855139017,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 2.56,
253
+ "learning_rate": 1.6873747682962393e-06,
254
+ "logits/chosen": 0.24000254273414612,
255
+ "logits/rejected": 0.27995795011520386,
256
+ "logps/chosen": -190.5437469482422,
257
+ "logps/rejected": -164.19549560546875,
258
+ "loss": 0.001,
259
+ "rewards/accuracies": 0.40625,
260
+ "rewards/chosen": 0.002512627048417926,
261
+ "rewards/margins": 0.00116717757191509,
262
+ "rewards/rejected": 0.001345449360087514,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 2.72,
267
+ "learning_rate": 1.363454985517803e-06,
268
+ "logits/chosen": 0.2393382042646408,
269
+ "logits/rejected": 0.26565679907798767,
270
+ "logps/chosen": -164.2212677001953,
271
+ "logps/rejected": -140.7093963623047,
272
+ "loss": 0.0011,
273
+ "rewards/accuracies": 0.4312500059604645,
274
+ "rewards/chosen": 0.0010435387957841158,
275
+ "rewards/margins": 0.0007807637448422611,
276
+ "rewards/rejected": 0.0002627749345265329,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 2.88,
281
+ "learning_rate": 1.062054677808238e-06,
282
+ "logits/chosen": 0.20416012406349182,
283
+ "logits/rejected": 0.1511019915342331,
284
+ "logps/chosen": -183.35943603515625,
285
+ "logps/rejected": -157.43435668945312,
286
+ "loss": 0.001,
287
+ "rewards/accuracies": 0.4124999940395355,
288
+ "rewards/chosen": -7.58793466957286e-05,
289
+ "rewards/margins": 0.0020282561890780926,
290
+ "rewards/rejected": -0.002104135463014245,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 3.04,
295
+ "learning_rate": 7.891457834794711e-07,
296
+ "logits/chosen": 0.19763195514678955,
297
+ "logits/rejected": 0.2482624500989914,
298
+ "logps/chosen": -169.32882690429688,
299
+ "logps/rejected": -142.48541259765625,
300
+ "loss": 0.001,
301
+ "rewards/accuracies": 0.44999998807907104,
302
+ "rewards/chosen": 0.0012013750383630395,
303
+ "rewards/margins": 0.0014427897986024618,
304
+ "rewards/rejected": -0.00024141438188962638,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 3.2,
309
+ "learning_rate": 5.501357126768117e-07,
310
+ "logits/chosen": 0.1878470778465271,
311
+ "logits/rejected": 0.18784348666667938,
312
+ "logps/chosen": -188.822998046875,
313
+ "logps/rejected": -153.36880493164062,
314
+ "loss": 0.0011,
315
+ "rewards/accuracies": 0.45625001192092896,
316
+ "rewards/chosen": 0.00035641956492327154,
317
+ "rewards/margins": 0.0015287164133042097,
318
+ "rewards/rejected": -0.001172296586446464,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 3.2,
323
+ "eval_logits/chosen": -0.004656919743865728,
324
+ "eval_logits/rejected": 0.09210014343261719,
325
+ "eval_logps/chosen": -306.283447265625,
326
+ "eval_logps/rejected": -278.6177062988281,
327
+ "eval_loss": 0.0015858013648539782,
328
+ "eval_rewards/accuracies": 0.4925000071525574,
329
+ "eval_rewards/chosen": -0.00014072553312871605,
330
+ "eval_rewards/margins": 0.00033963273745030165,
331
+ "eval_rewards/rejected": -0.0004803583142347634,
332
+ "eval_runtime": 1595.4244,
333
+ "eval_samples_per_second": 1.254,
334
+ "eval_steps_per_second": 0.313,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 3.36,
339
+ "learning_rate": 3.4976020508682345e-07,
340
+ "logits/chosen": 0.17026616632938385,
341
+ "logits/rejected": 0.2095976322889328,
342
+ "logps/chosen": -177.265625,
343
+ "logps/rejected": -149.7288055419922,
344
+ "loss": 0.001,
345
+ "rewards/accuracies": 0.48750001192092896,
346
+ "rewards/chosen": 0.00014497071970254183,
347
+ "rewards/margins": 0.0017732717096805573,
348
+ "rewards/rejected": -0.0016283008735626936,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 3.52,
353
+ "learning_rate": 1.9198949610721273e-07,
354
+ "logits/chosen": 0.19490325450897217,
355
+ "logits/rejected": 0.2224169224500656,
356
+ "logps/chosen": -175.52345275878906,
357
+ "logps/rejected": -146.68728637695312,
358
+ "loss": 0.001,
359
+ "rewards/accuracies": 0.45625001192092896,
360
+ "rewards/chosen": 0.002361242426559329,
361
+ "rewards/margins": 0.0017604168970137835,
362
+ "rewards/rejected": 0.0006008257623761892,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 3.68,
367
+ "learning_rate": 7.994965069994143e-08,
368
+ "logits/chosen": 0.21312932670116425,
369
+ "logits/rejected": 0.18425947427749634,
370
+ "logps/chosen": -187.47511291503906,
371
+ "logps/rejected": -163.0996856689453,
372
+ "loss": 0.001,
373
+ "rewards/accuracies": 0.46875,
374
+ "rewards/chosen": 0.0017060479149222374,
375
+ "rewards/margins": 0.001837022602558136,
376
+ "rewards/rejected": -0.00013097473129164428,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 3.84,
381
+ "learning_rate": 1.5860623616664183e-08,
382
+ "logits/chosen": 0.1936773955821991,
383
+ "logits/rejected": 0.12241797149181366,
384
+ "logps/chosen": -176.46658325195312,
385
+ "logps/rejected": -148.58331298828125,
386
+ "loss": 0.0011,
387
+ "rewards/accuracies": 0.39375001192092896,
388
+ "rewards/chosen": -0.0003967673401348293,
389
+ "rewards/margins": 0.00025122734950855374,
390
+ "rewards/rejected": -0.0006479948060587049,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 3.97,
395
+ "step": 248,
396
+ "total_flos": 0.0,
397
+ "train_loss": 0.0010954971686230911,
398
+ "train_runtime": 8408.6931,
399
+ "train_samples_per_second": 0.476,
400
+ "train_steps_per_second": 0.029
401
+ }
402
+ ],
403
+ "logging_steps": 10,
404
+ "max_steps": 248,
405
+ "num_input_tokens_seen": 0,
406
+ "num_train_epochs": 4,
407
+ "save_steps": 100,
408
+ "total_flos": 0.0,
409
+ "train_batch_size": 4,
410
+ "trial_name": null,
411
+ "trial_params": null
412
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99ba7198be8c2605f012ab450f983e794a29cc5593bde9363a80c0173afa0ade
3
+ size 5880
vocab.json ADDED
The diff for this file is too large to render. See raw diff