hllj commited on
Commit
058bd06
1 Parent(s): 4a317fb

Model save

Browse files
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: hllj/mistral-vi-math
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: sft-mistral-v3-all
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # sft-mistral-v3-all
14
+
15
+ This model is a fine-tuned version of [hllj/mistral-vi-math](https://huggingface.co/hllj/mistral-vi-math) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 0.4937
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: cosine
43
+ - lr_scheduler_warmup_ratio: 0.05
44
+ - num_epochs: 2
45
+ - mixed_precision_training: Native AMP
46
+
47
+ ### Training results
48
+
49
+
50
+
51
+ ### Framework versions
52
+
53
+ - Transformers 4.35.2
54
+ - Pytorch 2.1.0
55
+ - Datasets 2.15.0
56
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "hllj/mistral-vi-math",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 128,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 256,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "o_proj",
20
+ "k_proj",
21
+ "v_proj",
22
+ "q_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f6bca230ef43184c05ba5106e1b5978b26c04de78285775fc6de35254c0b91e
3
+ size 872450448
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.45,
3
+ "eval_loss": 0.4937315583229065,
4
+ "eval_runtime": 81.9976,
5
+ "eval_samples": 852,
6
+ "eval_samples_per_second": 10.391,
7
+ "eval_steps_per_second": 1.305,
8
+ "train_loss": 0.3165048904197161,
9
+ "train_runtime": 4971.9971,
10
+ "train_samples": 7665,
11
+ "train_samples_per_second": 3.083,
12
+ "train_steps_per_second": 0.386
13
+ }
config_argument.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cache_dir: ./cache
2
+ ddp_find_unused_parameters: false
3
+ ddp_timeout: 30000
4
+ device_map: auto
5
+ do_eval: true
6
+ do_train: true
7
+ eval_steps: 1000
8
+ evaluation_strategy: steps
9
+ fp16: true
10
+ gradient_accumulation_steps: 1
11
+ gradient_checkpointing: true
12
+ gradient_checkpointing_kwargs:
13
+ use_reentrant: false
14
+ hub_model_id: hllj/sft-mistral-v3-all
15
+ hub_strategy: every_save
16
+ learning_rate: 5.0e-05
17
+ log_level: info
18
+ logging_first_step: true
19
+ logging_steps: 10
20
+ logging_strategy: steps
21
+ lora_alpha: 128
22
+ lora_dropout: 0.05
23
+ lora_r: 256
24
+ lora_target_modules: all
25
+ lr_scheduler_type: cosine
26
+ max_seq_length: 1024
27
+ model_name_or_path: hllj/mistral-vi-math
28
+ model_type: auto
29
+ num_train_epochs: 2
30
+ output_dir: outputs-sft-mistral-v3-all
31
+ overwrite_output_dir: true
32
+ per_device_eval_batch_size: 8
33
+ per_device_train_batch_size: 8
34
+ preprocessing_num_workers: 4
35
+ push_to_hub: true
36
+ report_to: wandb
37
+ run_name: sft-mistral-v3-all
38
+ save_steps: 1000
39
+ save_strategy: steps
40
+ save_total_limit: 13
41
+ seed: 42
42
+ token: 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
43
+ torch_dtype: float16
44
+ train_file_dir: datasets/finetune
45
+ use_peft: true
46
+ warmup_ratio: 0.05
47
+ weight_decay: 0.05
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.45,
3
+ "eval_loss": 0.4937315583229065,
4
+ "eval_runtime": 81.9976,
5
+ "eval_samples": 852,
6
+ "eval_samples_per_second": 10.391,
7
+ "eval_steps_per_second": 1.305
8
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "<unk>",
37
+ "padding_side": "right",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": true
43
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.45,
3
+ "train_loss": 0.3165048904197161,
4
+ "train_runtime": 4971.9971,
5
+ "train_samples": 7665,
6
+ "train_samples_per_second": 3.083,
7
+ "train_steps_per_second": 0.386
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.448383733055266,
5
+ "eval_steps": 1000,
6
+ "global_step": 860,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 5.208333333333334e-07,
14
+ "loss": 0.7516,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.01,
19
+ "learning_rate": 5.208333333333334e-06,
20
+ "loss": 0.6842,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.02,
25
+ "learning_rate": 1.0416666666666668e-05,
26
+ "loss": 0.6229,
27
+ "step": 20
28
+ },
29
+ {
30
+ "epoch": 0.03,
31
+ "learning_rate": 1.5625e-05,
32
+ "loss": 0.5266,
33
+ "step": 30
34
+ },
35
+ {
36
+ "epoch": 0.04,
37
+ "learning_rate": 2.0833333333333336e-05,
38
+ "loss": 0.4158,
39
+ "step": 40
40
+ },
41
+ {
42
+ "epoch": 0.05,
43
+ "learning_rate": 2.604166666666667e-05,
44
+ "loss": 0.3737,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.06,
49
+ "learning_rate": 3.125e-05,
50
+ "loss": 0.3821,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.07,
55
+ "learning_rate": 3.6458333333333336e-05,
56
+ "loss": 0.3576,
57
+ "step": 70
58
+ },
59
+ {
60
+ "epoch": 0.08,
61
+ "learning_rate": 4.166666666666667e-05,
62
+ "loss": 0.351,
63
+ "step": 80
64
+ },
65
+ {
66
+ "epoch": 0.09,
67
+ "learning_rate": 4.6875e-05,
68
+ "loss": 0.3453,
69
+ "step": 90
70
+ },
71
+ {
72
+ "epoch": 0.1,
73
+ "learning_rate": 4.999940539127957e-05,
74
+ "loss": 0.3554,
75
+ "step": 100
76
+ },
77
+ {
78
+ "epoch": 0.11,
79
+ "learning_rate": 4.999271636800334e-05,
80
+ "loss": 0.3586,
81
+ "step": 110
82
+ },
83
+ {
84
+ "epoch": 0.13,
85
+ "learning_rate": 4.997859705581399e-05,
86
+ "loss": 0.3445,
87
+ "step": 120
88
+ },
89
+ {
90
+ "epoch": 0.14,
91
+ "learning_rate": 4.995705165235725e-05,
92
+ "loss": 0.3476,
93
+ "step": 130
94
+ },
95
+ {
96
+ "epoch": 0.15,
97
+ "learning_rate": 4.992808656304221e-05,
98
+ "loss": 0.33,
99
+ "step": 140
100
+ },
101
+ {
102
+ "epoch": 0.16,
103
+ "learning_rate": 4.9891710399136984e-05,
104
+ "loss": 0.3354,
105
+ "step": 150
106
+ },
107
+ {
108
+ "epoch": 0.17,
109
+ "learning_rate": 4.984793397520864e-05,
110
+ "loss": 0.3356,
111
+ "step": 160
112
+ },
113
+ {
114
+ "epoch": 0.18,
115
+ "learning_rate": 4.9796770305908045e-05,
116
+ "loss": 0.3216,
117
+ "step": 170
118
+ },
119
+ {
120
+ "epoch": 0.19,
121
+ "learning_rate": 4.97382346021006e-05,
122
+ "loss": 0.3353,
123
+ "step": 180
124
+ },
125
+ {
126
+ "epoch": 0.2,
127
+ "learning_rate": 4.96723442663441e-05,
128
+ "loss": 0.3216,
129
+ "step": 190
130
+ },
131
+ {
132
+ "epoch": 0.21,
133
+ "learning_rate": 4.959911888771496e-05,
134
+ "loss": 0.3251,
135
+ "step": 200
136
+ },
137
+ {
138
+ "epoch": 0.22,
139
+ "learning_rate": 4.951858023598448e-05,
140
+ "loss": 0.3324,
141
+ "step": 210
142
+ },
143
+ {
144
+ "epoch": 0.23,
145
+ "learning_rate": 4.943075225514667e-05,
146
+ "loss": 0.3269,
147
+ "step": 220
148
+ },
149
+ {
150
+ "epoch": 0.24,
151
+ "learning_rate": 4.9335661056299755e-05,
152
+ "loss": 0.3298,
153
+ "step": 230
154
+ },
155
+ {
156
+ "epoch": 0.25,
157
+ "learning_rate": 4.923333490988343e-05,
158
+ "loss": 0.3154,
159
+ "step": 240
160
+ },
161
+ {
162
+ "epoch": 0.26,
163
+ "learning_rate": 4.912380423727405e-05,
164
+ "loss": 0.3195,
165
+ "step": 250
166
+ },
167
+ {
168
+ "epoch": 0.27,
169
+ "learning_rate": 4.900710160174048e-05,
170
+ "loss": 0.3215,
171
+ "step": 260
172
+ },
173
+ {
174
+ "epoch": 0.28,
175
+ "learning_rate": 4.888326169876304e-05,
176
+ "loss": 0.3378,
177
+ "step": 270
178
+ },
179
+ {
180
+ "epoch": 0.29,
181
+ "learning_rate": 4.875232134571862e-05,
182
+ "loss": 0.3254,
183
+ "step": 280
184
+ },
185
+ {
186
+ "epoch": 0.3,
187
+ "learning_rate": 4.8614319470934935e-05,
188
+ "loss": 0.3513,
189
+ "step": 290
190
+ },
191
+ {
192
+ "epoch": 0.31,
193
+ "learning_rate": 4.846929710211724e-05,
194
+ "loss": 0.3355,
195
+ "step": 300
196
+ },
197
+ {
198
+ "epoch": 0.32,
199
+ "learning_rate": 4.83172973541508e-05,
200
+ "loss": 0.3238,
201
+ "step": 310
202
+ },
203
+ {
204
+ "epoch": 0.33,
205
+ "learning_rate": 4.815836541628299e-05,
206
+ "loss": 0.325,
207
+ "step": 320
208
+ },
209
+ {
210
+ "epoch": 0.34,
211
+ "learning_rate": 4.7992548538688556e-05,
212
+ "loss": 0.3256,
213
+ "step": 330
214
+ },
215
+ {
216
+ "epoch": 0.35,
217
+ "learning_rate": 4.7819896018422237e-05,
218
+ "loss": 0.3273,
219
+ "step": 340
220
+ },
221
+ {
222
+ "epoch": 0.36,
223
+ "learning_rate": 4.764045918476288e-05,
224
+ "loss": 0.3086,
225
+ "step": 350
226
+ },
227
+ {
228
+ "epoch": 0.38,
229
+ "learning_rate": 4.7454291383953285e-05,
230
+ "loss": 0.3171,
231
+ "step": 360
232
+ },
233
+ {
234
+ "epoch": 0.39,
235
+ "learning_rate": 4.726144796334049e-05,
236
+ "loss": 0.3241,
237
+ "step": 370
238
+ },
239
+ {
240
+ "epoch": 0.4,
241
+ "learning_rate": 4.706198625492111e-05,
242
+ "loss": 0.3194,
243
+ "step": 380
244
+ },
245
+ {
246
+ "epoch": 0.41,
247
+ "learning_rate": 4.685596555829663e-05,
248
+ "loss": 0.316,
249
+ "step": 390
250
+ },
251
+ {
252
+ "epoch": 0.42,
253
+ "learning_rate": 4.664344712304375e-05,
254
+ "loss": 0.315,
255
+ "step": 400
256
+ },
257
+ {
258
+ "epoch": 0.43,
259
+ "learning_rate": 4.642449413050498e-05,
260
+ "loss": 0.3209,
261
+ "step": 410
262
+ },
263
+ {
264
+ "epoch": 0.44,
265
+ "learning_rate": 4.619917167500496e-05,
266
+ "loss": 0.3146,
267
+ "step": 420
268
+ },
269
+ {
270
+ "epoch": 0.45,
271
+ "learning_rate": 4.5967546744498046e-05,
272
+ "loss": 0.3066,
273
+ "step": 430
274
+ },
275
+ {
276
+ "epoch": 1.01,
277
+ "learning_rate": 4.572968820065287e-05,
278
+ "loss": 0.3024,
279
+ "step": 440
280
+ },
281
+ {
282
+ "epoch": 1.02,
283
+ "learning_rate": 4.5485666758379956e-05,
284
+ "loss": 0.3074,
285
+ "step": 450
286
+ },
287
+ {
288
+ "epoch": 1.03,
289
+ "learning_rate": 4.5235554964808235e-05,
290
+ "loss": 0.2973,
291
+ "step": 460
292
+ },
293
+ {
294
+ "epoch": 1.04,
295
+ "learning_rate": 4.497942717771698e-05,
296
+ "loss": 0.2799,
297
+ "step": 470
298
+ },
299
+ {
300
+ "epoch": 1.05,
301
+ "learning_rate": 4.4717359543429316e-05,
302
+ "loss": 0.291,
303
+ "step": 480
304
+ },
305
+ {
306
+ "epoch": 1.06,
307
+ "learning_rate": 4.444942997417412e-05,
308
+ "loss": 0.2803,
309
+ "step": 490
310
+ },
311
+ {
312
+ "epoch": 1.07,
313
+ "learning_rate": 4.417571812492279e-05,
314
+ "loss": 0.2679,
315
+ "step": 500
316
+ },
317
+ {
318
+ "epoch": 1.08,
319
+ "learning_rate": 4.389630536970805e-05,
320
+ "loss": 0.2799,
321
+ "step": 510
322
+ },
323
+ {
324
+ "epoch": 1.09,
325
+ "learning_rate": 4.361127477743151e-05,
326
+ "loss": 0.2911,
327
+ "step": 520
328
+ },
329
+ {
330
+ "epoch": 1.1,
331
+ "learning_rate": 4.332071108716747e-05,
332
+ "loss": 0.2858,
333
+ "step": 530
334
+ },
335
+ {
336
+ "epoch": 1.11,
337
+ "learning_rate": 4.302470068297019e-05,
338
+ "loss": 0.2927,
339
+ "step": 540
340
+ },
341
+ {
342
+ "epoch": 1.13,
343
+ "learning_rate": 4.2723331568192e-05,
344
+ "loss": 0.3027,
345
+ "step": 550
346
+ },
347
+ {
348
+ "epoch": 1.14,
349
+ "learning_rate": 4.2416693339320115e-05,
350
+ "loss": 0.2884,
351
+ "step": 560
352
+ },
353
+ {
354
+ "epoch": 1.15,
355
+ "learning_rate": 4.2104877159339726e-05,
356
+ "loss": 0.305,
357
+ "step": 570
358
+ },
359
+ {
360
+ "epoch": 1.16,
361
+ "learning_rate": 4.178797573063144e-05,
362
+ "loss": 0.2814,
363
+ "step": 580
364
+ },
365
+ {
366
+ "epoch": 1.17,
367
+ "learning_rate": 4.1466083267411005e-05,
368
+ "loss": 0.2742,
369
+ "step": 590
370
+ },
371
+ {
372
+ "epoch": 1.18,
373
+ "learning_rate": 4.113929546771963e-05,
374
+ "loss": 0.2727,
375
+ "step": 600
376
+ },
377
+ {
378
+ "epoch": 1.19,
379
+ "learning_rate": 4.080770948497311e-05,
380
+ "loss": 0.3017,
381
+ "step": 610
382
+ },
383
+ {
384
+ "epoch": 1.2,
385
+ "learning_rate": 4.047142389907827e-05,
386
+ "loss": 0.2684,
387
+ "step": 620
388
+ },
389
+ {
390
+ "epoch": 1.21,
391
+ "learning_rate": 4.0130538687125394e-05,
392
+ "loss": 0.2782,
393
+ "step": 630
394
+ },
395
+ {
396
+ "epoch": 1.22,
397
+ "learning_rate": 3.978515519366519e-05,
398
+ "loss": 0.2852,
399
+ "step": 640
400
+ },
401
+ {
402
+ "epoch": 1.23,
403
+ "learning_rate": 3.943537610057921e-05,
404
+ "loss": 0.2858,
405
+ "step": 650
406
+ },
407
+ {
408
+ "epoch": 1.24,
409
+ "learning_rate": 3.908130539655278e-05,
410
+ "loss": 0.2803,
411
+ "step": 660
412
+ },
413
+ {
414
+ "epoch": 1.25,
415
+ "learning_rate": 3.8723048346159285e-05,
416
+ "loss": 0.2844,
417
+ "step": 670
418
+ },
419
+ {
420
+ "epoch": 1.26,
421
+ "learning_rate": 3.836071145856526e-05,
422
+ "loss": 0.2787,
423
+ "step": 680
424
+ },
425
+ {
426
+ "epoch": 1.27,
427
+ "learning_rate": 3.7994402455865376e-05,
428
+ "loss": 0.2719,
429
+ "step": 690
430
+ },
431
+ {
432
+ "epoch": 1.28,
433
+ "learning_rate": 3.762423024105685e-05,
434
+ "loss": 0.2798,
435
+ "step": 700
436
+ },
437
+ {
438
+ "epoch": 1.29,
439
+ "learning_rate": 3.7250304865662857e-05,
440
+ "loss": 0.2811,
441
+ "step": 710
442
+ },
443
+ {
444
+ "epoch": 1.3,
445
+ "learning_rate": 3.6872737497014286e-05,
446
+ "loss": 0.2672,
447
+ "step": 720
448
+ },
449
+ {
450
+ "epoch": 1.31,
451
+ "learning_rate": 3.649164038520001e-05,
452
+ "loss": 0.2732,
453
+ "step": 730
454
+ },
455
+ {
456
+ "epoch": 1.32,
457
+ "learning_rate": 3.6107126829695094e-05,
458
+ "loss": 0.2759,
459
+ "step": 740
460
+ },
461
+ {
462
+ "epoch": 1.33,
463
+ "learning_rate": 3.5719311145677055e-05,
464
+ "loss": 0.2617,
465
+ "step": 750
466
+ },
467
+ {
468
+ "epoch": 1.34,
469
+ "learning_rate": 3.532830863004018e-05,
470
+ "loss": 0.2809,
471
+ "step": 760
472
+ },
473
+ {
474
+ "epoch": 1.35,
475
+ "learning_rate": 3.4934235527118e-05,
476
+ "loss": 0.2662,
477
+ "step": 770
478
+ },
479
+ {
480
+ "epoch": 1.36,
481
+ "learning_rate": 3.4537208994124014e-05,
482
+ "loss": 0.2496,
483
+ "step": 780
484
+ },
485
+ {
486
+ "epoch": 1.38,
487
+ "learning_rate": 3.41373470663211e-05,
488
+ "loss": 0.2761,
489
+ "step": 790
490
+ },
491
+ {
492
+ "epoch": 1.39,
493
+ "learning_rate": 3.37347686219298e-05,
494
+ "loss": 0.2621,
495
+ "step": 800
496
+ },
497
+ {
498
+ "epoch": 1.4,
499
+ "learning_rate": 3.3329593346786125e-05,
500
+ "loss": 0.264,
501
+ "step": 810
502
+ },
503
+ {
504
+ "epoch": 1.41,
505
+ "learning_rate": 3.292194169875908e-05,
506
+ "loss": 0.2552,
507
+ "step": 820
508
+ },
509
+ {
510
+ "epoch": 1.42,
511
+ "learning_rate": 3.251193487193883e-05,
512
+ "loss": 0.2675,
513
+ "step": 830
514
+ },
515
+ {
516
+ "epoch": 1.43,
517
+ "learning_rate": 3.209969476060587e-05,
518
+ "loss": 0.2614,
519
+ "step": 840
520
+ },
521
+ {
522
+ "epoch": 1.44,
523
+ "learning_rate": 3.168534392299214e-05,
524
+ "loss": 0.2721,
525
+ "step": 850
526
+ },
527
+ {
528
+ "epoch": 1.45,
529
+ "learning_rate": 3.126900554484459e-05,
530
+ "loss": 0.2751,
531
+ "step": 860
532
+ },
533
+ {
534
+ "epoch": 1.45,
535
+ "step": 860,
536
+ "total_flos": 3.091617472671908e+17,
537
+ "train_loss": 0.3165048904197161,
538
+ "train_runtime": 4971.9971,
539
+ "train_samples_per_second": 3.083,
540
+ "train_steps_per_second": 0.386
541
+ }
542
+ ],
543
+ "logging_steps": 10,
544
+ "max_steps": 1918,
545
+ "num_train_epochs": 2,
546
+ "save_steps": 1000,
547
+ "total_flos": 3.091617472671908e+17,
548
+ "trial_name": null,
549
+ "trial_params": null
550
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c3afe54135ad942380dee2eef4388fa893295c951f98e6eb91b191f536cecc4
3
+ size 4664