wzhouad commited on
Commit
91852ce
1 Parent(s): 8c5d52c

Model save

Browse files
README.md CHANGED
@@ -13,19 +13,10 @@ model-index:
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
 
16
  # zephyr-7b-dpo-full
17
 
18
  This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
19
- It achieves the following results on the evaluation set:
20
- - Loss: 0.0513
21
- - Rewards/chosen: -2.3210
22
- - Rewards/rejected: -3.0411
23
- - Rewards/accuracies: 0.7148
24
- - Rewards/margins: 0.7201
25
- - Logps/rejected: -561.4609
26
- - Logps/chosen: -489.1372
27
- - Logits/rejected: -2.1024
28
- - Logits/chosen: -2.1271
29
 
30
  ## Model description
31
 
@@ -47,11 +38,11 @@ The following hyperparameters were used during training:
47
  - learning_rate: 5e-07
48
  - train_batch_size: 8
49
  - eval_batch_size: 8
50
- - seed: 2
51
  - distributed_type: multi-GPU
52
  - num_devices: 8
53
- - gradient_accumulation_steps: 4
54
- - total_train_batch_size: 256
55
  - total_eval_batch_size: 64
56
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
  - lr_scheduler_type: cosine
@@ -60,17 +51,11 @@ The following hyperparameters were used during training:
60
 
61
  ### Training results
62
 
63
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
- |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
- | 0.0816 | 0.23 | 100 | 0.0900 | -1.3317 | -1.7519 | 0.6602 | 0.4202 | -432.5398 | -390.2077 | -2.3227 | -2.3412 |
66
- | 0.0436 | 0.45 | 200 | 0.0598 | -2.0588 | -2.6924 | 0.6914 | 0.6335 | -526.5885 | -462.9209 | -2.1205 | -2.1455 |
67
- | 0.0388 | 0.68 | 300 | 0.0515 | -2.3897 | -3.1201 | 0.6914 | 0.7304 | -569.3627 | -496.0135 | -2.1019 | -2.1254 |
68
- | 0.0379 | 0.91 | 400 | 0.0513 | -2.3210 | -3.0411 | 0.7148 | 0.7201 | -561.4609 | -489.1372 | -2.1024 | -2.1271 |
69
 
70
 
71
  ### Framework versions
72
 
73
- - Transformers 4.35.2
74
  - Pytorch 2.1.2+cu121
75
  - Datasets 2.14.6
76
- - Tokenizers 0.14.1
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/sanqiang/wdpo/runs/i9fm0xk3)
17
  # zephyr-7b-dpo-full
18
 
19
  This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
 
 
 
 
 
 
 
 
 
 
20
 
21
  ## Model description
22
 
 
38
  - learning_rate: 5e-07
39
  - train_batch_size: 8
40
  - eval_batch_size: 8
41
+ - seed: 42
42
  - distributed_type: multi-GPU
43
  - num_devices: 8
44
+ - gradient_accumulation_steps: 2
45
+ - total_train_batch_size: 128
46
  - total_eval_batch_size: 64
47
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
  - lr_scheduler_type: cosine
 
51
 
52
  ### Training results
53
 
 
 
 
 
 
 
54
 
55
 
56
  ### Framework versions
57
 
58
+ - Transformers 4.41.0.dev0
59
  - Pytorch 2.1.2+cu121
60
  - Datasets 2.14.6
61
+ - Tokenizers 0.19.1
all_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.07976685402716369,
4
- "train_runtime": 7402.7266,
5
- "train_samples": 113028,
6
- "train_samples_per_second": 15.268,
7
- "train_steps_per_second": 0.06
 
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.20092295110225677,
5
+ "train_runtime": 384.8987,
6
+ "train_samples": 6750,
7
+ "train_samples_per_second": 17.537,
8
+ "train_steps_per_second": 0.138
9
  }
config.json CHANGED
@@ -3,6 +3,7 @@
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
6
  "bos_token_id": 1,
7
  "eos_token_id": 2,
8
  "hidden_act": "silu",
@@ -19,7 +20,7 @@
19
  "sliding_window": 4096,
20
  "tie_word_embeddings": false,
21
  "torch_dtype": "bfloat16",
22
- "transformers_version": "4.35.2",
23
  "use_cache": false,
24
  "vocab_size": 32000
25
  }
 
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
6
+ "attention_dropout": 0.0,
7
  "bos_token_id": 1,
8
  "eos_token_id": 2,
9
  "hidden_act": "silu",
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.41.0.dev0",
24
  "use_cache": false,
25
  "vocab_size": 32000
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.35.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.41.0.dev0"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e59e1d706e776f4bb6112dc215f13d6a43b67453b5a859c61bce0f8762f42eeb
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a46cc3b5f84d4d99bb5b73eed876f0b5578ddd4737df15a4e469e33c95c08170
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2b30f9389d19c5e0d1de28a31da254c3562caa7388541f928b2398860cd4ef6
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde9eb125e0d5a3b3305ef0cdb4bea397e66b0d0622e3a57126bb6a5687634fa
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a7668ab91df9a474ad277cfe790a8d47594470c8cb376e6652299336892c440
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0430eff547cb6ba5d3819817467987ed432cfa8a3a5fcd748e54a73cb5ddc4
3
  size 4540516344
tokenizer.json CHANGED
@@ -134,6 +134,7 @@
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
 
134
  "end_of_word_suffix": null,
135
  "fuse_unk": true,
136
  "byte_fallback": true,
137
+ "ignore_merges": false,
138
  "vocab": {
139
  "<unk>": 0,
140
  "<s>": 1,
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
@@ -34,7 +36,6 @@
34
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
  "clean_up_tokenization_spaces": false,
36
  "eos_token": "</s>",
37
- "legacy": true,
38
  "model_max_length": 2048,
39
  "pad_token": "</s>",
40
  "sp_model_kwargs": {},
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
36
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
37
  "clean_up_tokenization_spaces": false,
38
  "eos_token": "</s>",
 
39
  "model_max_length": 2048,
40
  "pad_token": "</s>",
41
  "sp_model_kwargs": {},
train_results.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.07976685402716369,
4
- "train_runtime": 7402.7266,
5
- "train_samples": 113028,
6
- "train_samples_per_second": 15.268,
7
- "train_steps_per_second": 0.06
 
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.20092295110225677,
5
+ "train_runtime": 384.8987,
6
+ "train_samples": 6750,
7
+ "train_samples_per_second": 17.537,
8
+ "train_steps_per_second": 0.138
9
  }
trainer_state.json CHANGED
@@ -1,21 +1,25 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.99830220713073,
5
  "eval_steps": 100,
6
- "global_step": 441,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.0,
13
- "learning_rate": 1.111111111111111e-08,
14
- "logits/chosen": -2.827775478363037,
15
- "logits/rejected": -2.806304931640625,
16
- "logps/chosen": -362.28790283203125,
17
- "logps/rejected": -205.56106567382812,
18
- "loss": 0.2753,
 
 
 
 
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -23,700 +27,124 @@
23
  "step": 1
24
  },
25
  {
26
- "epoch": 0.02,
27
- "learning_rate": 1.111111111111111e-07,
28
- "logits/chosen": -2.802647829055786,
29
- "logits/rejected": -2.7721259593963623,
30
- "logps/chosen": -319.9109191894531,
31
- "logps/rejected": -176.29066467285156,
32
- "loss": 0.2791,
33
- "rewards/accuracies": 0.4340277910232544,
34
- "rewards/chosen": 0.00017140436102636158,
35
- "rewards/margins": -0.00022869586246088147,
36
- "rewards/rejected": 0.00040010022348724306,
 
 
 
 
37
  "step": 10
38
  },
39
  {
40
- "epoch": 0.05,
41
- "learning_rate": 2.222222222222222e-07,
42
- "logits/chosen": -2.779694080352783,
43
- "logits/rejected": -2.74702787399292,
44
- "logps/chosen": -311.74652099609375,
45
- "logps/rejected": -177.87513732910156,
46
- "loss": 0.276,
47
- "rewards/accuracies": 0.643750011920929,
48
- "rewards/chosen": 0.008742044679820538,
49
- "rewards/margins": 0.014466273598372936,
50
- "rewards/rejected": -0.005724229384213686,
 
 
 
 
51
  "step": 20
52
  },
53
  {
54
- "epoch": 0.07,
55
- "learning_rate": 3.333333333333333e-07,
56
- "logits/chosen": -2.742488384246826,
57
- "logits/rejected": -2.722848415374756,
58
- "logps/chosen": -313.6725769042969,
59
- "logps/rejected": -186.76075744628906,
60
- "loss": 0.2771,
61
- "rewards/accuracies": 0.628125011920929,
62
- "rewards/chosen": 0.020179977640509605,
63
- "rewards/margins": 0.06532613933086395,
64
- "rewards/rejected": -0.0451461598277092,
 
 
 
 
65
  "step": 30
66
  },
67
  {
68
- "epoch": 0.09,
69
- "learning_rate": 4.444444444444444e-07,
70
- "logits/chosen": -2.594104528427124,
71
- "logits/rejected": -2.5780692100524902,
72
- "logps/chosen": -353.83160400390625,
73
- "logps/rejected": -217.767578125,
74
- "loss": 0.2573,
75
- "rewards/accuracies": 0.690625011920929,
76
- "rewards/chosen": -0.0030881466809660196,
77
- "rewards/margins": 0.2199195921421051,
78
- "rewards/rejected": -0.22300772368907928,
 
 
 
 
79
  "step": 40
80
  },
81
  {
82
- "epoch": 0.11,
83
- "learning_rate": 4.998033461515242e-07,
84
- "logits/chosen": -2.5498433113098145,
85
- "logits/rejected": -2.5390689373016357,
86
- "logps/chosen": -369.34344482421875,
87
- "logps/rejected": -245.7183380126953,
88
- "loss": 0.2109,
89
- "rewards/accuracies": 0.6499999761581421,
90
- "rewards/chosen": -0.1884610652923584,
91
- "rewards/margins": 0.33448106050491333,
92
- "rewards/rejected": -0.5229421257972717,
 
 
 
 
93
  "step": 50
94
  },
95
- {
96
- "epoch": 0.14,
97
- "learning_rate": 4.982319711683221e-07,
98
- "logits/chosen": -2.5170645713806152,
99
- "logits/rejected": -2.4964470863342285,
100
- "logps/chosen": -390.71759033203125,
101
- "logps/rejected": -284.5617370605469,
102
- "loss": 0.1661,
103
- "rewards/accuracies": 0.6812499761581421,
104
- "rewards/chosen": -0.3339163661003113,
105
- "rewards/margins": 0.5272020101547241,
106
- "rewards/rejected": -0.8611184358596802,
107
- "step": 60
108
- },
109
- {
110
- "epoch": 0.16,
111
- "learning_rate": 4.950991058546892e-07,
112
- "logits/chosen": -2.5328118801116943,
113
- "logits/rejected": -2.4936556816101074,
114
- "logps/chosen": -401.39605712890625,
115
- "logps/rejected": -309.35333251953125,
116
- "loss": 0.1375,
117
- "rewards/accuracies": 0.699999988079071,
118
- "rewards/chosen": -0.46392518281936646,
119
- "rewards/margins": 0.6156176924705505,
120
- "rewards/rejected": -1.079542875289917,
121
- "step": 70
122
- },
123
- {
124
- "epoch": 0.18,
125
- "learning_rate": 4.904244573372733e-07,
126
- "logits/chosen": -2.540886402130127,
127
- "logits/rejected": -2.495255708694458,
128
- "logps/chosen": -454.4031677246094,
129
- "logps/rejected": -335.4547119140625,
130
- "loss": 0.1093,
131
- "rewards/accuracies": 0.7437499761581421,
132
- "rewards/chosen": -0.48765724897384644,
133
- "rewards/margins": 0.8079099655151367,
134
- "rewards/rejected": -1.2955673933029175,
135
- "step": 80
136
- },
137
- {
138
- "epoch": 0.2,
139
- "learning_rate": 4.842374312499405e-07,
140
- "logits/chosen": -2.4646222591400146,
141
- "logits/rejected": -2.417442560195923,
142
- "logps/chosen": -409.55828857421875,
143
- "logps/rejected": -335.1234130859375,
144
- "loss": 0.0864,
145
- "rewards/accuracies": 0.7093750238418579,
146
- "rewards/chosen": -0.7838326692581177,
147
- "rewards/margins": 0.7940716743469238,
148
- "rewards/rejected": -1.5779043436050415,
149
- "step": 90
150
- },
151
- {
152
- "epoch": 0.23,
153
- "learning_rate": 4.7657694675916247e-07,
154
- "logits/chosen": -2.3520429134368896,
155
- "logits/rejected": -2.3132762908935547,
156
- "logps/chosen": -412.2884826660156,
157
- "logps/rejected": -369.1390075683594,
158
- "loss": 0.0816,
159
- "rewards/accuracies": 0.731249988079071,
160
- "rewards/chosen": -0.9074515104293823,
161
- "rewards/margins": 0.9162980318069458,
162
- "rewards/rejected": -1.8237495422363281,
163
- "step": 100
164
- },
165
- {
166
- "epoch": 0.23,
167
- "eval_logits/chosen": -2.341198444366455,
168
- "eval_logits/rejected": -2.322673797607422,
169
- "eval_logps/chosen": -390.20770263671875,
170
- "eval_logps/rejected": -432.539794921875,
171
- "eval_loss": 0.09001600742340088,
172
- "eval_rewards/accuracies": 0.66015625,
173
- "eval_rewards/chosen": -1.3316810131072998,
174
- "eval_rewards/margins": 0.4201836585998535,
175
- "eval_rewards/rejected": -1.7518646717071533,
176
- "eval_runtime": 53.5036,
177
- "eval_samples_per_second": 37.381,
178
- "eval_steps_per_second": 0.598,
179
- "step": 100
180
- },
181
- {
182
- "epoch": 0.25,
183
- "learning_rate": 4.6749119174501973e-07,
184
- "logits/chosen": -2.2996747493743896,
185
- "logits/rejected": -2.242867946624756,
186
- "logps/chosen": -462.94598388671875,
187
- "logps/rejected": -403.91363525390625,
188
- "loss": 0.0729,
189
- "rewards/accuracies": 0.768750011920929,
190
- "rewards/chosen": -0.8766400218009949,
191
- "rewards/margins": 1.1215176582336426,
192
- "rewards/rejected": -1.9981575012207031,
193
- "step": 110
194
- },
195
- {
196
- "epoch": 0.27,
197
- "learning_rate": 4.5703731967784265e-07,
198
- "logits/chosen": -2.2242417335510254,
199
- "logits/rejected": -2.181910276412964,
200
- "logps/chosen": -449.12713623046875,
201
- "logps/rejected": -427.40740966796875,
202
- "loss": 0.0685,
203
- "rewards/accuracies": 0.7124999761581421,
204
- "rewards/chosen": -1.2070833444595337,
205
- "rewards/margins": 0.9871729612350464,
206
- "rewards/rejected": -2.19425630569458,
207
- "step": 120
208
- },
209
- {
210
- "epoch": 0.29,
211
- "learning_rate": 4.4528109009727333e-07,
212
- "logits/chosen": -2.1515939235687256,
213
- "logits/rejected": -2.0918548107147217,
214
- "logps/chosen": -467.0848693847656,
215
- "logps/rejected": -428.3409118652344,
216
- "loss": 0.0531,
217
- "rewards/accuracies": 0.7093750238418579,
218
- "rewards/chosen": -1.561851978302002,
219
- "rewards/margins": 0.9053736925125122,
220
- "rewards/rejected": -2.4672255516052246,
221
- "step": 130
222
- },
223
- {
224
- "epoch": 0.32,
225
- "learning_rate": 4.3229645495529427e-07,
226
- "logits/chosen": -2.2256131172180176,
227
- "logits/rejected": -2.1506736278533936,
228
- "logps/chosen": -481.4261169433594,
229
- "logps/rejected": -418.9735412597656,
230
- "loss": 0.0496,
231
- "rewards/accuracies": 0.7281249761581421,
232
- "rewards/chosen": -1.3596832752227783,
233
- "rewards/margins": 1.1094987392425537,
234
- "rewards/rejected": -2.469182014465332,
235
- "step": 140
236
- },
237
- {
238
- "epoch": 0.34,
239
- "learning_rate": 4.1816509342531317e-07,
240
- "logits/chosen": -2.2947065830230713,
241
- "logits/rejected": -2.2372803688049316,
242
- "logps/chosen": -467.123291015625,
243
- "logps/rejected": -395.8599548339844,
244
- "loss": 0.0605,
245
- "rewards/accuracies": 0.737500011920929,
246
- "rewards/chosen": -1.2707955837249756,
247
- "rewards/margins": 0.942121148109436,
248
- "rewards/rejected": -2.212916851043701,
249
- "step": 150
250
- },
251
- {
252
- "epoch": 0.36,
253
- "learning_rate": 4.0297589810356166e-07,
254
- "logits/chosen": -2.233494281768799,
255
- "logits/rejected": -2.1654052734375,
256
- "logps/chosen": -485.3228454589844,
257
- "logps/rejected": -422.75555419921875,
258
- "loss": 0.0551,
259
- "rewards/accuracies": 0.715624988079071,
260
- "rewards/chosen": -1.3737380504608154,
261
- "rewards/margins": 1.092675805091858,
262
- "rewards/rejected": -2.466413974761963,
263
- "step": 160
264
- },
265
- {
266
- "epoch": 0.38,
267
- "learning_rate": 3.868244158348331e-07,
268
- "logits/chosen": -2.2172982692718506,
269
- "logits/rejected": -2.1708760261535645,
270
- "logps/chosen": -494.2137145996094,
271
- "logps/rejected": -449.77587890625,
272
- "loss": 0.0577,
273
- "rewards/accuracies": 0.778124988079071,
274
- "rewards/chosen": -1.3581817150115967,
275
- "rewards/margins": 1.175022840499878,
276
- "rewards/rejected": -2.5332045555114746,
277
- "step": 170
278
- },
279
- {
280
- "epoch": 0.41,
281
- "learning_rate": 3.698122466800142e-07,
282
- "logits/chosen": -2.196737766265869,
283
- "logits/rejected": -2.150078296661377,
284
- "logps/chosen": -453.3573303222656,
285
- "logps/rejected": -426.41143798828125,
286
- "loss": 0.0658,
287
- "rewards/accuracies": 0.7593749761581421,
288
- "rewards/chosen": -1.144860029220581,
289
- "rewards/margins": 1.1531283855438232,
290
- "rewards/rejected": -2.2979884147644043,
291
- "step": 180
292
- },
293
- {
294
- "epoch": 0.43,
295
- "learning_rate": 3.5204640480617574e-07,
296
- "logits/chosen": -2.1627323627471924,
297
- "logits/rejected": -2.110278367996216,
298
- "logps/chosen": -524.0073852539062,
299
- "logps/rejected": -460.2293395996094,
300
- "loss": 0.0492,
301
- "rewards/accuracies": 0.768750011920929,
302
- "rewards/chosen": -1.415473222732544,
303
- "rewards/margins": 1.2913919687271118,
304
- "rewards/rejected": -2.7068653106689453,
305
- "step": 190
306
- },
307
- {
308
- "epoch": 0.45,
309
- "learning_rate": 3.336386453195088e-07,
310
- "logits/chosen": -2.1464579105377197,
311
- "logits/rejected": -2.082240581512451,
312
- "logps/chosen": -505.35919189453125,
313
- "logps/rejected": -458.5272521972656,
314
- "loss": 0.0436,
315
- "rewards/accuracies": 0.75,
316
- "rewards/chosen": -1.7200065851211548,
317
- "rewards/margins": 1.1776483058929443,
318
- "rewards/rejected": -2.8976550102233887,
319
- "step": 200
320
- },
321
- {
322
- "epoch": 0.45,
323
- "eval_logits/chosen": -2.1454780101776123,
324
- "eval_logits/rejected": -2.1204702854156494,
325
- "eval_logps/chosen": -462.9208679199219,
326
- "eval_logps/rejected": -526.5885009765625,
327
- "eval_loss": 0.059836775064468384,
328
- "eval_rewards/accuracies": 0.69140625,
329
- "eval_rewards/chosen": -2.058812379837036,
330
- "eval_rewards/margins": 0.6335402131080627,
331
- "eval_rewards/rejected": -2.692352533340454,
332
- "eval_runtime": 53.3575,
333
- "eval_samples_per_second": 37.483,
334
- "eval_steps_per_second": 0.6,
335
- "step": 200
336
- },
337
- {
338
- "epoch": 0.48,
339
- "learning_rate": 3.147047612756302e-07,
340
- "logits/chosen": -2.164459705352783,
341
- "logits/rejected": -2.11089825630188,
342
- "logps/chosen": -475.6734924316406,
343
- "logps/rejected": -459.15673828125,
344
- "loss": 0.0505,
345
- "rewards/accuracies": 0.721875011920929,
346
- "rewards/chosen": -1.560291051864624,
347
- "rewards/margins": 1.194120168685913,
348
- "rewards/rejected": -2.754411220550537,
349
- "step": 210
350
- },
351
- {
352
- "epoch": 0.5,
353
- "learning_rate": 2.9536385528937565e-07,
354
- "logits/chosen": -2.1479196548461914,
355
- "logits/rejected": -2.0868375301361084,
356
- "logps/chosen": -458.2518005371094,
357
- "logps/rejected": -436.82305908203125,
358
- "loss": 0.0575,
359
- "rewards/accuracies": 0.7437499761581421,
360
- "rewards/chosen": -1.339440941810608,
361
- "rewards/margins": 1.1341434717178345,
362
- "rewards/rejected": -2.4735846519470215,
363
- "step": 220
364
- },
365
- {
366
- "epoch": 0.52,
367
- "learning_rate": 2.7573759032598365e-07,
368
- "logits/chosen": -2.1152689456939697,
369
- "logits/rejected": -2.074328899383545,
370
- "logps/chosen": -492.90380859375,
371
- "logps/rejected": -454.63873291015625,
372
- "loss": 0.0565,
373
- "rewards/accuracies": 0.7437499761581421,
374
- "rewards/chosen": -1.4534698724746704,
375
- "rewards/margins": 1.1257164478302002,
376
- "rewards/rejected": -2.579186201095581,
377
- "step": 230
378
- },
379
- {
380
- "epoch": 0.54,
381
- "learning_rate": 2.5594942438652685e-07,
382
- "logits/chosen": -2.1014912128448486,
383
- "logits/rejected": -2.049712657928467,
384
- "logps/chosen": -508.5966796875,
385
- "logps/rejected": -468.09991455078125,
386
- "loss": 0.0471,
387
- "rewards/accuracies": 0.7562500238418579,
388
- "rewards/chosen": -1.5375745296478271,
389
- "rewards/margins": 1.1159954071044922,
390
- "rewards/rejected": -2.6535699367523193,
391
- "step": 240
392
- },
393
- {
394
- "epoch": 0.57,
395
- "learning_rate": 2.36123833901765e-07,
396
- "logits/chosen": -2.1314926147460938,
397
- "logits/rejected": -2.077198028564453,
398
- "logps/chosen": -519.6549072265625,
399
- "logps/rejected": -478.80889892578125,
400
- "loss": 0.0445,
401
- "rewards/accuracies": 0.746874988079071,
402
- "rewards/chosen": -1.766661286354065,
403
- "rewards/margins": 1.2140429019927979,
404
- "rewards/rejected": -2.9807040691375732,
405
- "step": 250
406
- },
407
- {
408
- "epoch": 0.59,
409
- "learning_rate": 2.1638553071961704e-07,
410
- "logits/chosen": -2.159257650375366,
411
- "logits/rejected": -2.0843465328216553,
412
- "logps/chosen": -498.5057678222656,
413
- "logps/rejected": -481.7154235839844,
414
- "loss": 0.0462,
415
- "rewards/accuracies": 0.793749988079071,
416
- "rewards/chosen": -1.548032522201538,
417
- "rewards/margins": 1.3328226804733276,
418
- "rewards/rejected": -2.8808555603027344,
419
- "step": 260
420
- },
421
- {
422
- "epoch": 0.61,
423
- "learning_rate": 1.968586776117558e-07,
424
- "logits/chosen": -2.1956114768981934,
425
- "logits/rejected": -2.1366395950317383,
426
- "logps/chosen": -484.2972717285156,
427
- "logps/rejected": -429.1397399902344,
428
- "loss": 0.0537,
429
- "rewards/accuracies": 0.71875,
430
- "rewards/chosen": -1.4318220615386963,
431
- "rewards/margins": 1.0592107772827148,
432
- "rewards/rejected": -2.491032838821411,
433
- "step": 270
434
- },
435
- {
436
- "epoch": 0.63,
437
- "learning_rate": 1.7766610723413684e-07,
438
- "logits/chosen": -2.1730809211730957,
439
- "logits/rejected": -2.1212735176086426,
440
- "logps/chosen": -490.05908203125,
441
- "logps/rejected": -451.37628173828125,
442
- "loss": 0.0562,
443
- "rewards/accuracies": 0.784375011920929,
444
- "rewards/chosen": -1.4677350521087646,
445
- "rewards/margins": 1.121753215789795,
446
- "rewards/rejected": -2.5894885063171387,
447
- "step": 280
448
- },
449
- {
450
- "epoch": 0.66,
451
- "learning_rate": 1.589285494545514e-07,
452
- "logits/chosen": -2.1455771923065186,
453
- "logits/rejected": -2.096673011779785,
454
- "logps/chosen": -530.82421875,
455
- "logps/rejected": -489.6752014160156,
456
- "loss": 0.0405,
457
- "rewards/accuracies": 0.765625,
458
- "rewards/chosen": -1.7482846975326538,
459
- "rewards/margins": 1.2687255144119263,
460
- "rewards/rejected": -3.01701021194458,
461
- "step": 290
462
- },
463
- {
464
- "epoch": 0.68,
465
- "learning_rate": 1.4076387190766014e-07,
466
- "logits/chosen": -2.1248068809509277,
467
- "logits/rejected": -2.0664258003234863,
468
- "logps/chosen": -537.5523681640625,
469
- "logps/rejected": -512.0633544921875,
470
- "loss": 0.0388,
471
- "rewards/accuracies": 0.703125,
472
- "rewards/chosen": -2.0803182125091553,
473
- "rewards/margins": 1.1989731788635254,
474
- "rewards/rejected": -3.2792911529541016,
475
- "step": 300
476
- },
477
- {
478
- "epoch": 0.68,
479
- "eval_logits/chosen": -2.125394821166992,
480
- "eval_logits/rejected": -2.1019349098205566,
481
- "eval_logps/chosen": -496.0135192871094,
482
- "eval_logps/rejected": -569.3626708984375,
483
- "eval_loss": 0.051513027399778366,
484
- "eval_rewards/accuracies": 0.69140625,
485
- "eval_rewards/chosen": -2.3897383213043213,
486
- "eval_rewards/margins": 0.7303550243377686,
487
- "eval_rewards/rejected": -3.120093822479248,
488
- "eval_runtime": 53.3835,
489
- "eval_samples_per_second": 37.465,
490
- "eval_steps_per_second": 0.599,
491
- "step": 300
492
- },
493
- {
494
- "epoch": 0.7,
495
- "learning_rate": 1.232863385547543e-07,
496
- "logits/chosen": -2.138700246810913,
497
- "logits/rejected": -2.0852296352386475,
498
- "logps/chosen": -534.0220947265625,
499
- "logps/rejected": -511.69708251953125,
500
- "loss": 0.0391,
501
- "rewards/accuracies": 0.78125,
502
- "rewards/chosen": -1.956750512123108,
503
- "rewards/margins": 1.3101288080215454,
504
- "rewards/rejected": -3.266878843307495,
505
- "step": 310
506
- },
507
- {
508
- "epoch": 0.72,
509
- "learning_rate": 1.0660589091223854e-07,
510
- "logits/chosen": -2.1349263191223145,
511
- "logits/rejected": -2.077669620513916,
512
- "logps/chosen": -520.9324340820312,
513
- "logps/rejected": -508.90594482421875,
514
- "loss": 0.0403,
515
- "rewards/accuracies": 0.75,
516
- "rewards/chosen": -1.970788598060608,
517
- "rewards/margins": 1.172662615776062,
518
- "rewards/rejected": -3.14345121383667,
519
- "step": 320
520
- },
521
- {
522
- "epoch": 0.75,
523
- "learning_rate": 9.082745647022797e-08,
524
- "logits/chosen": -2.1367201805114746,
525
- "logits/rejected": -2.0910043716430664,
526
- "logps/chosen": -503.3335876464844,
527
- "logps/rejected": -482.1080627441406,
528
- "loss": 0.0424,
529
- "rewards/accuracies": 0.784375011920929,
530
- "rewards/chosen": -1.7588704824447632,
531
- "rewards/margins": 1.2970627546310425,
532
- "rewards/rejected": -3.0559334754943848,
533
- "step": 330
534
- },
535
- {
536
- "epoch": 0.77,
537
- "learning_rate": 7.605028865161809e-08,
538
- "logits/chosen": -2.1519930362701416,
539
- "logits/rejected": -2.074977397918701,
540
- "logps/chosen": -539.51708984375,
541
- "logps/rejected": -511.9608459472656,
542
- "loss": 0.0443,
543
- "rewards/accuracies": 0.762499988079071,
544
- "rewards/chosen": -1.8557418584823608,
545
- "rewards/margins": 1.2462117671966553,
546
- "rewards/rejected": -3.1019535064697266,
547
- "step": 340
548
- },
549
- {
550
- "epoch": 0.79,
551
- "learning_rate": 6.236734246357947e-08,
552
- "logits/chosen": -2.1703686714172363,
553
- "logits/rejected": -2.1259913444519043,
554
- "logps/chosen": -537.4775390625,
555
- "logps/rejected": -482.252685546875,
556
- "loss": 0.0441,
557
- "rewards/accuracies": 0.746874988079071,
558
- "rewards/chosen": -1.876011610031128,
559
- "rewards/margins": 1.030928611755371,
560
- "rewards/rejected": -2.906940221786499,
561
- "step": 350
562
- },
563
- {
564
- "epoch": 0.81,
565
- "learning_rate": 4.986468976890992e-08,
566
- "logits/chosen": -2.1170713901519775,
567
- "logits/rejected": -2.0758965015411377,
568
- "logps/chosen": -507.7197265625,
569
- "logps/rejected": -487.06494140625,
570
- "loss": 0.0391,
571
- "rewards/accuracies": 0.753125011920929,
572
- "rewards/chosen": -1.775130033493042,
573
- "rewards/margins": 1.3335883617401123,
574
- "rewards/rejected": -3.1087188720703125,
575
- "step": 360
576
- },
577
- {
578
- "epoch": 0.84,
579
- "learning_rate": 3.8620977855448936e-08,
580
- "logits/chosen": -2.1223392486572266,
581
- "logits/rejected": -2.07041072845459,
582
- "logps/chosen": -520.2659912109375,
583
- "logps/rejected": -504.3395080566406,
584
- "loss": 0.0399,
585
- "rewards/accuracies": 0.7749999761581421,
586
- "rewards/chosen": -1.8968217372894287,
587
- "rewards/margins": 1.2073428630828857,
588
- "rewards/rejected": -3.1041648387908936,
589
- "step": 370
590
- },
591
- {
592
- "epoch": 0.86,
593
- "learning_rate": 2.8706934709395893e-08,
594
- "logits/chosen": -2.122580051422119,
595
- "logits/rejected": -2.0831775665283203,
596
- "logps/chosen": -529.0001831054688,
597
- "logps/rejected": -509.4918518066406,
598
- "loss": 0.0402,
599
- "rewards/accuracies": 0.746874988079071,
600
- "rewards/chosen": -1.9584871530532837,
601
- "rewards/margins": 1.2146377563476562,
602
- "rewards/rejected": -3.1731247901916504,
603
- "step": 380
604
- },
605
- {
606
- "epoch": 0.88,
607
- "learning_rate": 2.0184924104583612e-08,
608
- "logits/chosen": -2.136026620864868,
609
- "logits/rejected": -2.085597515106201,
610
- "logps/chosen": -521.5689697265625,
611
- "logps/rejected": -499.8408203125,
612
- "loss": 0.0378,
613
- "rewards/accuracies": 0.7749999761581421,
614
- "rewards/chosen": -1.883663535118103,
615
- "rewards/margins": 1.2730777263641357,
616
- "rewards/rejected": -3.156741142272949,
617
- "step": 390
618
- },
619
- {
620
- "epoch": 0.91,
621
- "learning_rate": 1.3108553306396263e-08,
622
- "logits/chosen": -2.1042487621307373,
623
- "logits/rejected": -2.0541131496429443,
624
- "logps/chosen": -535.3380126953125,
625
- "logps/rejected": -510.76416015625,
626
- "loss": 0.0379,
627
- "rewards/accuracies": 0.737500011920929,
628
- "rewards/chosen": -1.9656769037246704,
629
- "rewards/margins": 1.2311311960220337,
630
- "rewards/rejected": -3.196807861328125,
631
- "step": 400
632
- },
633
- {
634
- "epoch": 0.91,
635
- "eval_logits/chosen": -2.1270744800567627,
636
- "eval_logits/rejected": -2.102363348007202,
637
- "eval_logps/chosen": -489.1372375488281,
638
- "eval_logps/rejected": -561.4609375,
639
- "eval_loss": 0.05125967413187027,
640
- "eval_rewards/accuracies": 0.71484375,
641
- "eval_rewards/chosen": -2.3209760189056396,
642
- "eval_rewards/margins": 0.7201008796691895,
643
- "eval_rewards/rejected": -3.041076898574829,
644
- "eval_runtime": 53.3435,
645
- "eval_samples_per_second": 37.493,
646
- "eval_steps_per_second": 0.6,
647
- "step": 400
648
- },
649
- {
650
- "epoch": 0.93,
651
- "learning_rate": 7.522335858048705e-09,
652
- "logits/chosen": -2.139943838119507,
653
- "logits/rejected": -2.093489408493042,
654
- "logps/chosen": -541.4684448242188,
655
- "logps/rejected": -513.9920654296875,
656
- "loss": 0.0401,
657
- "rewards/accuracies": 0.737500011920929,
658
- "rewards/chosen": -1.8892101049423218,
659
- "rewards/margins": 1.2535432577133179,
660
- "rewards/rejected": -3.1427536010742188,
661
- "step": 410
662
- },
663
- {
664
- "epoch": 0.95,
665
- "learning_rate": 3.4614115704533766e-09,
666
- "logits/chosen": -2.1335110664367676,
667
- "logits/rejected": -2.0755324363708496,
668
- "logps/chosen": -552.4773559570312,
669
- "logps/rejected": -511.8482971191406,
670
- "loss": 0.0364,
671
- "rewards/accuracies": 0.731249988079071,
672
- "rewards/chosen": -1.868373155593872,
673
- "rewards/margins": 1.291067123413086,
674
- "rewards/rejected": -3.159440279006958,
675
- "step": 420
676
- },
677
- {
678
- "epoch": 0.97,
679
- "learning_rate": 9.513254770636137e-10,
680
- "logits/chosen": -2.1106972694396973,
681
- "logits/rejected": -2.043785810470581,
682
- "logps/chosen": -515.1985473632812,
683
- "logps/rejected": -489.65399169921875,
684
- "loss": 0.0408,
685
- "rewards/accuracies": 0.737500011920929,
686
- "rewards/chosen": -1.9040443897247314,
687
- "rewards/margins": 1.2098912000656128,
688
- "rewards/rejected": -3.1139354705810547,
689
- "step": 430
690
- },
691
  {
692
  "epoch": 1.0,
693
- "learning_rate": 7.867144166728844e-12,
694
- "logits/chosen": -2.0860373973846436,
695
- "logits/rejected": -2.0547218322753906,
696
- "logps/chosen": -530.7337646484375,
697
- "logps/rejected": -510.04888916015625,
698
- "loss": 0.0426,
699
- "rewards/accuracies": 0.765625,
700
- "rewards/chosen": -1.9042129516601562,
701
- "rewards/margins": 1.2389646768569946,
702
- "rewards/rejected": -3.1431777477264404,
703
- "step": 440
704
- },
705
- {
706
- "epoch": 1.0,
707
- "step": 441,
708
  "total_flos": 0.0,
709
- "train_loss": 0.07976685402716369,
710
- "train_runtime": 7402.7266,
711
- "train_samples_per_second": 15.268,
712
- "train_steps_per_second": 0.06
713
  }
714
  ],
715
  "logging_steps": 10,
716
- "max_steps": 441,
 
717
  "num_train_epochs": 1,
718
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
719
  "total_flos": 0.0,
 
720
  "trial_name": null,
721
  "trial_params": null
722
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
  "eval_steps": 100,
6
+ "global_step": 53,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "debug/losses": 0.17192834615707397,
13
+ "debug/policy_weights": 0.24804016947746277,
14
+ "debug/raw_losses": 0.6931471824645996,
15
+ "epoch": 0.018867924528301886,
16
+ "grad_norm": 3.0791833143219045,
17
+ "learning_rate": 8.333333333333333e-08,
18
+ "logits/chosen": -2.855412006378174,
19
+ "logits/rejected": -2.8797199726104736,
20
+ "logps/chosen": -320.43853759765625,
21
+ "logps/rejected": -340.07073974609375,
22
+ "loss": 0.2116,
23
  "rewards/accuracies": 0.0,
24
  "rewards/chosen": 0.0,
25
  "rewards/margins": 0.0,
 
27
  "step": 1
28
  },
29
  {
30
+ "debug/losses": 0.20367620885372162,
31
+ "debug/policy_weights": 0.2948996424674988,
32
+ "debug/raw_losses": 0.6906173229217529,
33
+ "epoch": 0.18867924528301888,
34
+ "grad_norm": 3.2025034006962603,
35
+ "learning_rate": 4.911172937635942e-07,
36
+ "logits/chosen": -2.8658909797668457,
37
+ "logits/rejected": -2.8917548656463623,
38
+ "logps/chosen": -305.7406005859375,
39
+ "logps/rejected": -332.2490234375,
40
+ "loss": 0.2033,
41
+ "rewards/accuracies": 0.4791666567325592,
42
+ "rewards/chosen": 0.0008342999499291182,
43
+ "rewards/margins": 0.005153011996299028,
44
+ "rewards/rejected": -0.004318712279200554,
45
  "step": 10
46
  },
47
  {
48
+ "debug/losses": 0.20357565581798553,
49
+ "debug/policy_weights": 0.30120497941970825,
50
+ "debug/raw_losses": 0.675395131111145,
51
+ "epoch": 0.37735849056603776,
52
+ "grad_norm": 3.097721440067098,
53
+ "learning_rate": 3.982949361823388e-07,
54
+ "logits/chosen": -2.855691432952881,
55
+ "logits/rejected": -2.8575425148010254,
56
+ "logps/chosen": -324.3226623535156,
57
+ "logps/rejected": -331.01007080078125,
58
+ "loss": 0.2068,
59
+ "rewards/accuracies": 0.612500011920929,
60
+ "rewards/chosen": 0.0029448498971760273,
61
+ "rewards/margins": 0.03993762657046318,
62
+ "rewards/rejected": -0.03699277713894844,
63
  "step": 20
64
  },
65
  {
66
+ "debug/losses": 0.20828036963939667,
67
+ "debug/policy_weights": 0.3203299641609192,
68
+ "debug/raw_losses": 0.6491612195968628,
69
+ "epoch": 0.5660377358490566,
70
+ "grad_norm": 3.2758930059808353,
71
+ "learning_rate": 2.416462557480814e-07,
72
+ "logits/chosen": -2.839444398880005,
73
+ "logits/rejected": -2.842738628387451,
74
+ "logps/chosen": -298.2297668457031,
75
+ "logps/rejected": -313.391845703125,
76
+ "loss": 0.2007,
77
+ "rewards/accuracies": 0.731249988079071,
78
+ "rewards/chosen": -0.014156119897961617,
79
+ "rewards/margins": 0.11547034978866577,
80
+ "rewards/rejected": -0.12962646782398224,
81
  "step": 30
82
  },
83
  {
84
+ "debug/losses": 0.18270191550254822,
85
+ "debug/policy_weights": 0.30252760648727417,
86
+ "debug/raw_losses": 0.584720253944397,
87
+ "epoch": 0.7547169811320755,
88
+ "grad_norm": 2.7163808332522805,
89
+ "learning_rate": 8.859303711029939e-08,
90
+ "logits/chosen": -2.842470645904541,
91
+ "logits/rejected": -2.8394596576690674,
92
+ "logps/chosen": -290.64208984375,
93
+ "logps/rejected": -320.837890625,
94
+ "loss": 0.1963,
95
+ "rewards/accuracies": 0.731249988079071,
96
+ "rewards/chosen": -0.011087085120379925,
97
+ "rewards/margins": 0.30588188767433167,
98
+ "rewards/rejected": -0.31696897745132446,
99
  "step": 40
100
  },
101
  {
102
+ "debug/losses": 0.23133957386016846,
103
+ "debug/policy_weights": 0.3536807894706726,
104
+ "debug/raw_losses": 0.6469973921775818,
105
+ "epoch": 0.9433962264150944,
106
+ "grad_norm": 3.004599614335292,
107
+ "learning_rate": 5.009573740853313e-09,
108
+ "logits/chosen": -2.8772711753845215,
109
+ "logits/rejected": -2.864253520965576,
110
+ "logps/chosen": -282.04022216796875,
111
+ "logps/rejected": -316.0839538574219,
112
+ "loss": 0.1939,
113
+ "rewards/accuracies": 0.668749988079071,
114
+ "rewards/chosen": -0.027098428457975388,
115
+ "rewards/margins": 0.1554117500782013,
116
+ "rewards/rejected": -0.1825101673603058,
117
  "step": 50
118
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  {
120
  "epoch": 1.0,
121
+ "step": 53,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  "total_flos": 0.0,
123
+ "train_loss": 0.20092295110225677,
124
+ "train_runtime": 384.8987,
125
+ "train_samples_per_second": 17.537,
126
+ "train_steps_per_second": 0.138
127
  }
128
  ],
129
  "logging_steps": 10,
130
+ "max_steps": 53,
131
+ "num_input_tokens_seen": 0,
132
  "num_train_epochs": 1,
133
  "save_steps": 100,
134
+ "stateful_callbacks": {
135
+ "TrainerControl": {
136
+ "args": {
137
+ "should_epoch_stop": false,
138
+ "should_evaluate": false,
139
+ "should_log": false,
140
+ "should_save": false,
141
+ "should_training_stop": false
142
+ },
143
+ "attributes": {}
144
+ }
145
+ },
146
  "total_flos": 0.0,
147
+ "train_batch_size": 8,
148
  "trial_name": null,
149
  "trial_params": null
150
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8c151d7becb900bed631e41dff74cbdb1243adce5d3b22205a355b75f2b0912
3
- size 5944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ead67c9eb9d466ab2cc1bdbe4406c9d56939d72f781464822bd099547b2d5fd8
3
+ size 6456