BraylonDash commited on
Commit
cdfeb6b
1 Parent(s): 730f29b

Model save

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/phi-2
9
+ model-index:
10
+ - name: phi-2-gpo-test-longest-iter-random2-3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # phi-2-gpo-test-longest-iter-random2-3
18
+
19
+ This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on the None dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0018
22
+ - Rewards/chosen: -0.0061
23
+ - Rewards/rejected: -0.0056
24
+ - Rewards/accuracies: 0.5015
25
+ - Rewards/margins: -0.0005
26
+ - Logps/rejected: -279.5969
27
+ - Logps/chosen: -307.4853
28
+ - Logits/rejected: 0.0326
29
+ - Logits/chosen: -0.0650
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-06
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 4
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - gradient_accumulation_steps: 4
54
+ - total_train_batch_size: 16
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: cosine
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 4
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
63
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
64
+ | 0.0011 | 1.6 | 100 | 0.0018 | -0.0031 | -0.0021 | 0.4800 | -0.0009 | -279.2489 | -307.1817 | 0.0509 | -0.0480 |
65
+ | 0.001 | 3.2 | 200 | 0.0019 | -0.0055 | -0.0043 | 0.4765 | -0.0012 | -279.4667 | -307.4276 | 0.0323 | -0.0664 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - PEFT 0.7.1
71
+ - Transformers 4.36.2
72
+ - Pytorch 2.2.1+cu121
73
+ - Datasets 2.14.6
74
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 32,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "k_proj",
23
+ "v_proj",
24
+ "q_proj",
25
+ "dense"
26
+ ],
27
+ "task_type": "CAUSAL_LM"
28
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92cb91531d8dc6607646bb40a3c38bad55be034d177db6e3426febaaec897d7a
3
+ size 41977616
added_tokens.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "\t\t": 50294,
3
+ "\t\t\t": 50293,
4
+ "\t\t\t\t": 50292,
5
+ "\t\t\t\t\t": 50291,
6
+ "\t\t\t\t\t\t": 50290,
7
+ "\t\t\t\t\t\t\t": 50289,
8
+ "\t\t\t\t\t\t\t\t": 50288,
9
+ "\t\t\t\t\t\t\t\t\t": 50287,
10
+ " ": 50286,
11
+ " ": 50285,
12
+ " ": 50284,
13
+ " ": 50283,
14
+ " ": 50282,
15
+ " ": 50281,
16
+ " ": 50280,
17
+ " ": 50279,
18
+ " ": 50278,
19
+ " ": 50277,
20
+ " ": 50276,
21
+ " ": 50275,
22
+ " ": 50274,
23
+ " ": 50273,
24
+ " ": 50272,
25
+ " ": 50271,
26
+ " ": 50270,
27
+ " ": 50269,
28
+ " ": 50268,
29
+ " ": 50267,
30
+ " ": 50266,
31
+ " ": 50265,
32
+ " ": 50264,
33
+ " ": 50263,
34
+ " ": 50262,
35
+ " ": 50261,
36
+ " ": 50260,
37
+ " ": 50259,
38
+ " ": 50258,
39
+ " ": 50257
40
+ }
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "eval_logits/chosen": -0.06497720628976822,
4
+ "eval_logits/rejected": 0.03257768973708153,
5
+ "eval_logps/chosen": -307.4852600097656,
6
+ "eval_logps/rejected": -279.59686279296875,
7
+ "eval_loss": 0.001842426834627986,
8
+ "eval_rewards/accuracies": 0.5015000104904175,
9
+ "eval_rewards/chosen": -0.006085487548261881,
10
+ "eval_rewards/margins": -0.0004582978435792029,
11
+ "eval_rewards/rejected": -0.005627189297229052,
12
+ "eval_runtime": 412.5036,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 4.848,
15
+ "eval_steps_per_second": 1.212,
16
+ "train_loss": 0.0010713144931228712,
17
+ "train_runtime": 2635.242,
18
+ "train_samples": 61135,
19
+ "train_samples_per_second": 1.518,
20
+ "train_steps_per_second": 0.094
21
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "eval_logits/chosen": -0.06497720628976822,
4
+ "eval_logits/rejected": 0.03257768973708153,
5
+ "eval_logps/chosen": -307.4852600097656,
6
+ "eval_logps/rejected": -279.59686279296875,
7
+ "eval_loss": 0.001842426834627986,
8
+ "eval_rewards/accuracies": 0.5015000104904175,
9
+ "eval_rewards/chosen": -0.006085487548261881,
10
+ "eval_rewards/margins": -0.0004582978435792029,
11
+ "eval_rewards/rejected": -0.005627189297229052,
12
+ "eval_runtime": 412.5036,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 4.848,
15
+ "eval_steps_per_second": 1.212
16
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "50257": {
13
+ "content": " ",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": false
19
+ },
20
+ "50258": {
21
+ "content": " ",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ },
28
+ "50259": {
29
+ "content": " ",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": false
35
+ },
36
+ "50260": {
37
+ "content": " ",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": false
43
+ },
44
+ "50261": {
45
+ "content": " ",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ },
52
+ "50262": {
53
+ "content": " ",
54
+ "lstrip": false,
55
+ "normalized": true,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": false
59
+ },
60
+ "50263": {
61
+ "content": " ",
62
+ "lstrip": false,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": false
67
+ },
68
+ "50264": {
69
+ "content": " ",
70
+ "lstrip": false,
71
+ "normalized": true,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": false
75
+ },
76
+ "50265": {
77
+ "content": " ",
78
+ "lstrip": false,
79
+ "normalized": true,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": false
83
+ },
84
+ "50266": {
85
+ "content": " ",
86
+ "lstrip": false,
87
+ "normalized": true,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": false
91
+ },
92
+ "50267": {
93
+ "content": " ",
94
+ "lstrip": false,
95
+ "normalized": true,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": false
99
+ },
100
+ "50268": {
101
+ "content": " ",
102
+ "lstrip": false,
103
+ "normalized": true,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": false
107
+ },
108
+ "50269": {
109
+ "content": " ",
110
+ "lstrip": false,
111
+ "normalized": true,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": false
115
+ },
116
+ "50270": {
117
+ "content": " ",
118
+ "lstrip": false,
119
+ "normalized": true,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": false
123
+ },
124
+ "50271": {
125
+ "content": " ",
126
+ "lstrip": false,
127
+ "normalized": true,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": false
131
+ },
132
+ "50272": {
133
+ "content": " ",
134
+ "lstrip": false,
135
+ "normalized": true,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": false
139
+ },
140
+ "50273": {
141
+ "content": " ",
142
+ "lstrip": false,
143
+ "normalized": true,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": false
147
+ },
148
+ "50274": {
149
+ "content": " ",
150
+ "lstrip": false,
151
+ "normalized": true,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": false
155
+ },
156
+ "50275": {
157
+ "content": " ",
158
+ "lstrip": false,
159
+ "normalized": true,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": false
163
+ },
164
+ "50276": {
165
+ "content": " ",
166
+ "lstrip": false,
167
+ "normalized": true,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": false
171
+ },
172
+ "50277": {
173
+ "content": " ",
174
+ "lstrip": false,
175
+ "normalized": true,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": false
179
+ },
180
+ "50278": {
181
+ "content": " ",
182
+ "lstrip": false,
183
+ "normalized": true,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": false
187
+ },
188
+ "50279": {
189
+ "content": " ",
190
+ "lstrip": false,
191
+ "normalized": true,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": false
195
+ },
196
+ "50280": {
197
+ "content": " ",
198
+ "lstrip": false,
199
+ "normalized": true,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": false
203
+ },
204
+ "50281": {
205
+ "content": " ",
206
+ "lstrip": false,
207
+ "normalized": true,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": false
211
+ },
212
+ "50282": {
213
+ "content": " ",
214
+ "lstrip": false,
215
+ "normalized": true,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": false
219
+ },
220
+ "50283": {
221
+ "content": " ",
222
+ "lstrip": false,
223
+ "normalized": true,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": false
227
+ },
228
+ "50284": {
229
+ "content": " ",
230
+ "lstrip": false,
231
+ "normalized": true,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": false
235
+ },
236
+ "50285": {
237
+ "content": " ",
238
+ "lstrip": false,
239
+ "normalized": true,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": false
243
+ },
244
+ "50286": {
245
+ "content": " ",
246
+ "lstrip": false,
247
+ "normalized": true,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": false
251
+ },
252
+ "50287": {
253
+ "content": "\t\t\t\t\t\t\t\t\t",
254
+ "lstrip": false,
255
+ "normalized": true,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": false
259
+ },
260
+ "50288": {
261
+ "content": "\t\t\t\t\t\t\t\t",
262
+ "lstrip": false,
263
+ "normalized": true,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": false
267
+ },
268
+ "50289": {
269
+ "content": "\t\t\t\t\t\t\t",
270
+ "lstrip": false,
271
+ "normalized": true,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": false
275
+ },
276
+ "50290": {
277
+ "content": "\t\t\t\t\t\t",
278
+ "lstrip": false,
279
+ "normalized": true,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": false
283
+ },
284
+ "50291": {
285
+ "content": "\t\t\t\t\t",
286
+ "lstrip": false,
287
+ "normalized": true,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": false
291
+ },
292
+ "50292": {
293
+ "content": "\t\t\t\t",
294
+ "lstrip": false,
295
+ "normalized": true,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": false
299
+ },
300
+ "50293": {
301
+ "content": "\t\t\t",
302
+ "lstrip": false,
303
+ "normalized": true,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": false
307
+ },
308
+ "50294": {
309
+ "content": "\t\t",
310
+ "lstrip": false,
311
+ "normalized": true,
312
+ "rstrip": false,
313
+ "single_word": false,
314
+ "special": false
315
+ }
316
+ },
317
+ "bos_token": "<|endoftext|>",
318
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
319
+ "clean_up_tokenization_spaces": true,
320
+ "eos_token": "<|endoftext|>",
321
+ "model_max_length": 2048,
322
+ "pad_token": "<|endoftext|>",
323
+ "tokenizer_class": "CodeGenTokenizer",
324
+ "unk_token": "<|endoftext|>"
325
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.97,
3
+ "train_loss": 0.0010713144931228712,
4
+ "train_runtime": 2635.242,
5
+ "train_samples": 61135,
6
+ "train_samples_per_second": 1.518,
7
+ "train_steps_per_second": 0.094
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.968,
5
+ "eval_steps": 100,
6
+ "global_step": 248,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 2.0000000000000002e-07,
14
+ "logits/chosen": -0.026431415230035782,
15
+ "logits/rejected": -0.16986289620399475,
16
+ "logps/chosen": -151.98907470703125,
17
+ "logps/rejected": -93.52606964111328,
18
+ "loss": 0.0011,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.16,
27
+ "learning_rate": 2.0000000000000003e-06,
28
+ "logits/chosen": -0.147756889462471,
29
+ "logits/rejected": 0.020012276247143745,
30
+ "logps/chosen": -136.29202270507812,
31
+ "logps/rejected": -75.50608825683594,
32
+ "loss": 0.0013,
33
+ "rewards/accuracies": 0.4027777910232544,
34
+ "rewards/chosen": -0.0007271924405358732,
35
+ "rewards/margins": -0.0012592646526172757,
36
+ "rewards/rejected": 0.0005320722702890635,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.32,
41
+ "learning_rate": 4.000000000000001e-06,
42
+ "logits/chosen": -0.055388081818819046,
43
+ "logits/rejected": 0.15466341376304626,
44
+ "logps/chosen": -146.0823974609375,
45
+ "logps/rejected": -90.69671630859375,
46
+ "loss": 0.0013,
47
+ "rewards/accuracies": 0.44999998807907104,
48
+ "rewards/chosen": -3.685036062961444e-05,
49
+ "rewards/margins": -0.0004525856929831207,
50
+ "rewards/rejected": 0.00041573523776605725,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.48,
55
+ "learning_rate": 4.993800445762451e-06,
56
+ "logits/chosen": -0.2320336550474167,
57
+ "logits/rejected": 0.11852385103702545,
58
+ "logps/chosen": -137.83750915527344,
59
+ "logps/rejected": -78.5675048828125,
60
+ "loss": 0.0012,
61
+ "rewards/accuracies": 0.5,
62
+ "rewards/chosen": 0.0010912430007010698,
63
+ "rewards/margins": 0.001036056550219655,
64
+ "rewards/rejected": 5.518653051694855e-05,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.64,
69
+ "learning_rate": 4.944388344834205e-06,
70
+ "logits/chosen": -0.051973842084407806,
71
+ "logits/rejected": 0.04229766130447388,
72
+ "logps/chosen": -142.12911987304688,
73
+ "logps/rejected": -96.51297760009766,
74
+ "loss": 0.0013,
75
+ "rewards/accuracies": 0.5,
76
+ "rewards/chosen": 0.0018935591215267777,
77
+ "rewards/margins": 0.0001399043685523793,
78
+ "rewards/rejected": 0.001753654913045466,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.8,
83
+ "learning_rate": 4.8465431931347904e-06,
84
+ "logits/chosen": -0.16981413960456848,
85
+ "logits/rejected": 0.09270621091127396,
86
+ "logps/chosen": -133.02362060546875,
87
+ "logps/rejected": -90.3238525390625,
88
+ "loss": 0.0012,
89
+ "rewards/accuracies": 0.4375,
90
+ "rewards/chosen": 0.002572892000898719,
91
+ "rewards/margins": 0.0004979773075319827,
92
+ "rewards/rejected": 0.002074914751574397,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.96,
97
+ "learning_rate": 4.702203692102539e-06,
98
+ "logits/chosen": -0.10357820987701416,
99
+ "logits/rejected": 0.13292734324932098,
100
+ "logps/chosen": -143.26065063476562,
101
+ "logps/rejected": -81.9512710571289,
102
+ "loss": 0.0013,
103
+ "rewards/accuracies": 0.5,
104
+ "rewards/chosen": 0.0026912898756563663,
105
+ "rewards/margins": 0.0002925347362179309,
106
+ "rewards/rejected": 0.0023987549357116222,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 1.12,
111
+ "learning_rate": 4.514229781074239e-06,
112
+ "logits/chosen": -0.08756460249423981,
113
+ "logits/rejected": -0.010441536083817482,
114
+ "logps/chosen": -155.8980255126953,
115
+ "logps/rejected": -105.80094146728516,
116
+ "loss": 0.0011,
117
+ "rewards/accuracies": 0.543749988079071,
118
+ "rewards/chosen": 0.0051867421716451645,
119
+ "rewards/margins": 0.003287202911451459,
120
+ "rewards/rejected": 0.00189953891094774,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 1.28,
125
+ "learning_rate": 4.286345970517195e-06,
126
+ "logits/chosen": -0.11689458042383194,
127
+ "logits/rejected": 0.07064902782440186,
128
+ "logps/chosen": -124.151123046875,
129
+ "logps/rejected": -64.7757339477539,
130
+ "loss": 0.001,
131
+ "rewards/accuracies": 0.5562499761581421,
132
+ "rewards/chosen": 0.005000757984817028,
133
+ "rewards/margins": 0.0033935843966901302,
134
+ "rewards/rejected": 0.0016071733552962542,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 1.44,
139
+ "learning_rate": 4.023067544670082e-06,
140
+ "logits/chosen": -0.1919364184141159,
141
+ "logits/rejected": 0.19178633391857147,
142
+ "logps/chosen": -134.85496520996094,
143
+ "logps/rejected": -79.26893615722656,
144
+ "loss": 0.0011,
145
+ "rewards/accuracies": 0.543749988079071,
146
+ "rewards/chosen": 0.005402157548815012,
147
+ "rewards/margins": 0.0031424053013324738,
148
+ "rewards/rejected": 0.0022597520146518946,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 1.6,
153
+ "learning_rate": 3.7296110958116845e-06,
154
+ "logits/chosen": -0.21874800324440002,
155
+ "logits/rejected": 0.08748228847980499,
156
+ "logps/chosen": -143.6033935546875,
157
+ "logps/rejected": -93.09342956542969,
158
+ "loss": 0.0011,
159
+ "rewards/accuracies": 0.5625,
160
+ "rewards/chosen": 0.0063162692822515965,
161
+ "rewards/margins": 0.0023192143999040127,
162
+ "rewards/rejected": 0.003997053951025009,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 1.6,
167
+ "eval_logits/chosen": -0.0479925237596035,
168
+ "eval_logits/rejected": 0.05092725157737732,
169
+ "eval_logps/chosen": -307.1817321777344,
170
+ "eval_logps/rejected": -279.2488708496094,
171
+ "eval_loss": 0.0018035146640613675,
172
+ "eval_rewards/accuracies": 0.47999998927116394,
173
+ "eval_rewards/chosen": -0.003050154075026512,
174
+ "eval_rewards/margins": -0.0009030703222379088,
175
+ "eval_rewards/rejected": -0.0021470836363732815,
176
+ "eval_runtime": 412.557,
177
+ "eval_samples_per_second": 4.848,
178
+ "eval_steps_per_second": 1.212,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 1.76,
183
+ "learning_rate": 3.4117911628292944e-06,
184
+ "logits/chosen": -0.05666325241327286,
185
+ "logits/rejected": 0.15881529450416565,
186
+ "logps/chosen": -156.90457153320312,
187
+ "logps/rejected": -89.17283630371094,
188
+ "loss": 0.001,
189
+ "rewards/accuracies": 0.606249988079071,
190
+ "rewards/chosen": 0.009410916827619076,
191
+ "rewards/margins": 0.005939081776887178,
192
+ "rewards/rejected": 0.0034718364477157593,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 1.92,
197
+ "learning_rate": 3.075905022087675e-06,
198
+ "logits/chosen": -0.07111762464046478,
199
+ "logits/rejected": 0.03149420768022537,
200
+ "logps/chosen": -154.131103515625,
201
+ "logps/rejected": -102.81583404541016,
202
+ "loss": 0.0012,
203
+ "rewards/accuracies": 0.5625,
204
+ "rewards/chosen": 0.008770602755248547,
205
+ "rewards/margins": 0.0032143122516572475,
206
+ "rewards/rejected": 0.005556290503591299,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 2.08,
211
+ "learning_rate": 2.728607913349464e-06,
212
+ "logits/chosen": -0.25201350450515747,
213
+ "logits/rejected": 0.08731786906719208,
214
+ "logps/chosen": -133.0045928955078,
215
+ "logps/rejected": -75.28916931152344,
216
+ "loss": 0.001,
217
+ "rewards/accuracies": 0.643750011920929,
218
+ "rewards/chosen": 0.010033163242042065,
219
+ "rewards/margins": 0.005545603111386299,
220
+ "rewards/rejected": 0.004487560596317053,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 2.24,
225
+ "learning_rate": 2.376781173017589e-06,
226
+ "logits/chosen": -0.23511283099651337,
227
+ "logits/rejected": 0.056128501892089844,
228
+ "logps/chosen": -139.34347534179688,
229
+ "logps/rejected": -83.03041076660156,
230
+ "loss": 0.0009,
231
+ "rewards/accuracies": 0.643750011920929,
232
+ "rewards/chosen": 0.011506117880344391,
233
+ "rewards/margins": 0.007246524095535278,
234
+ "rewards/rejected": 0.0042595937848091125,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 2.4,
239
+ "learning_rate": 2.0273958875043877e-06,
240
+ "logits/chosen": -0.15843239426612854,
241
+ "logits/rejected": -0.025659451261162758,
242
+ "logps/chosen": -128.82235717773438,
243
+ "logps/rejected": -71.82875061035156,
244
+ "loss": 0.001,
245
+ "rewards/accuracies": 0.6312500238418579,
246
+ "rewards/chosen": 0.010396704077720642,
247
+ "rewards/margins": 0.006070839706808329,
248
+ "rewards/rejected": 0.004325864836573601,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 2.56,
253
+ "learning_rate": 1.6873747682962393e-06,
254
+ "logits/chosen": -0.10364966094493866,
255
+ "logits/rejected": 0.15611091256141663,
256
+ "logps/chosen": -146.84365844726562,
257
+ "logps/rejected": -94.91715240478516,
258
+ "loss": 0.0011,
259
+ "rewards/accuracies": 0.6312500238418579,
260
+ "rewards/chosen": 0.01151433028280735,
261
+ "rewards/margins": 0.005733857862651348,
262
+ "rewards/rejected": 0.005780472420156002,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 2.72,
267
+ "learning_rate": 1.363454985517803e-06,
268
+ "logits/chosen": -0.14020611345767975,
269
+ "logits/rejected": 0.0140873147174716,
270
+ "logps/chosen": -130.91256713867188,
271
+ "logps/rejected": -88.08763122558594,
272
+ "loss": 0.001,
273
+ "rewards/accuracies": 0.625,
274
+ "rewards/chosen": 0.01202808041125536,
275
+ "rewards/margins": 0.005936866160482168,
276
+ "rewards/rejected": 0.006091213319450617,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 2.88,
281
+ "learning_rate": 1.062054677808238e-06,
282
+ "logits/chosen": -0.07270533591508865,
283
+ "logits/rejected": 0.16701629757881165,
284
+ "logps/chosen": -149.4364471435547,
285
+ "logps/rejected": -83.87244415283203,
286
+ "loss": 0.001,
287
+ "rewards/accuracies": 0.6000000238418579,
288
+ "rewards/chosen": 0.011001331731677055,
289
+ "rewards/margins": 0.0055719343945384026,
290
+ "rewards/rejected": 0.00542939780279994,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 3.04,
295
+ "learning_rate": 7.891457834794711e-07,
296
+ "logits/chosen": -0.13850273191928864,
297
+ "logits/rejected": 0.006671518087387085,
298
+ "logps/chosen": -151.9923553466797,
299
+ "logps/rejected": -94.79319763183594,
300
+ "loss": 0.001,
301
+ "rewards/accuracies": 0.6187499761581421,
302
+ "rewards/chosen": 0.014899802394211292,
303
+ "rewards/margins": 0.006995724979788065,
304
+ "rewards/rejected": 0.00790407694876194,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 3.2,
309
+ "learning_rate": 5.501357126768117e-07,
310
+ "logits/chosen": -0.14668866991996765,
311
+ "logits/rejected": 0.10788450390100479,
312
+ "logps/chosen": -140.62472534179688,
313
+ "logps/rejected": -88.3201904296875,
314
+ "loss": 0.001,
315
+ "rewards/accuracies": 0.6187499761581421,
316
+ "rewards/chosen": 0.011035704985260963,
317
+ "rewards/margins": 0.006984876934438944,
318
+ "rewards/rejected": 0.004050827585160732,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 3.2,
323
+ "eval_logits/chosen": -0.0663725882768631,
324
+ "eval_logits/rejected": 0.03233730420470238,
325
+ "eval_logps/chosen": -307.4276123046875,
326
+ "eval_logps/rejected": -279.4666748046875,
327
+ "eval_loss": 0.0018860435811802745,
328
+ "eval_rewards/accuracies": 0.476500004529953,
329
+ "eval_rewards/chosen": -0.0055097793228924274,
330
+ "eval_rewards/margins": -0.001184401917271316,
331
+ "eval_rewards/rejected": -0.0043253772892057896,
332
+ "eval_runtime": 412.8262,
333
+ "eval_samples_per_second": 4.845,
334
+ "eval_steps_per_second": 1.211,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 3.36,
339
+ "learning_rate": 3.4976020508682345e-07,
340
+ "logits/chosen": -0.2199704945087433,
341
+ "logits/rejected": 0.13789795339107513,
342
+ "logps/chosen": -151.80128479003906,
343
+ "logps/rejected": -87.3798599243164,
344
+ "loss": 0.001,
345
+ "rewards/accuracies": 0.6625000238418579,
346
+ "rewards/chosen": 0.012573355808854103,
347
+ "rewards/margins": 0.006345916539430618,
348
+ "rewards/rejected": 0.00622743833810091,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 3.52,
353
+ "learning_rate": 1.9198949610721273e-07,
354
+ "logits/chosen": -0.19111016392707825,
355
+ "logits/rejected": -0.011430763639509678,
356
+ "logps/chosen": -132.64901733398438,
357
+ "logps/rejected": -73.41009521484375,
358
+ "loss": 0.0009,
359
+ "rewards/accuracies": 0.612500011920929,
360
+ "rewards/chosen": 0.010676460340619087,
361
+ "rewards/margins": 0.006789586041122675,
362
+ "rewards/rejected": 0.0038868754636496305,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 3.68,
367
+ "learning_rate": 7.994965069994143e-08,
368
+ "logits/chosen": -0.1486184000968933,
369
+ "logits/rejected": 0.17951056361198425,
370
+ "logps/chosen": -139.86245727539062,
371
+ "logps/rejected": -91.14982604980469,
372
+ "loss": 0.001,
373
+ "rewards/accuracies": 0.606249988079071,
374
+ "rewards/chosen": 0.012494201771914959,
375
+ "rewards/margins": 0.006681007333099842,
376
+ "rewards/rejected": 0.005813195835798979,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 3.84,
381
+ "learning_rate": 1.5860623616664183e-08,
382
+ "logits/chosen": -0.13247700035572052,
383
+ "logits/rejected": -0.05245450884103775,
384
+ "logps/chosen": -140.35784912109375,
385
+ "logps/rejected": -86.07032012939453,
386
+ "loss": 0.0009,
387
+ "rewards/accuracies": 0.6625000238418579,
388
+ "rewards/chosen": 0.014865470118820667,
389
+ "rewards/margins": 0.008430338464677334,
390
+ "rewards/rejected": 0.006435131188482046,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 3.97,
395
+ "step": 248,
396
+ "total_flos": 0.0,
397
+ "train_loss": 0.0010713144931228712,
398
+ "train_runtime": 2635.242,
399
+ "train_samples_per_second": 1.518,
400
+ "train_steps_per_second": 0.094
401
+ }
402
+ ],
403
+ "logging_steps": 10,
404
+ "max_steps": 248,
405
+ "num_input_tokens_seen": 0,
406
+ "num_train_epochs": 4,
407
+ "save_steps": 100,
408
+ "total_flos": 0.0,
409
+ "train_batch_size": 4,
410
+ "trial_name": null,
411
+ "trial_params": null
412
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea85d6fef05e01f4f8d99756caff2b842cbd1aebe2542a6e7141e495c1a29b8d
3
+ size 5880
vocab.json ADDED
The diff for this file is too large to render. See raw diff