bimabk commited on
Commit
da9124f
·
verified ·
1 Parent(s): 308c212

Upload task output ac92fa52-28b8-479a-b5d5-a678407b5011

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 896,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 4864,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 24,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 14,
16
+ "num_hidden_layers": 24,
17
+ "num_key_value_heads": 2,
18
+ "pad_token_id": 151643,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": true,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.51.3",
26
+ "unsloth_version": "2024.9",
27
+ "use_cache": false,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 151936
30
+ }
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "max_length": 32768,
9
+ "pad_token_id": 151643,
10
+ "repetition_penalty": 1.1,
11
+ "temperature": 0.7,
12
+ "top_k": 20,
13
+ "top_p": 0.8,
14
+ "transformers_version": "4.51.3"
15
+ }
loss.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 166,1.4646453857421875
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28c55f72a64a9f2fb4d72715f0ef031374c9a1a790f1e960ded5c1e3f6618fcd
3
+ size 988097824
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcfe42da0a4497e8b2b172c1f9f4ec423a46dc12907f4349c55025f670422ba9
3
+ size 11418266
tokenizer_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "extra_special_tokens": {},
39
+ "model_max_length": 32768,
40
+ "pad_token": "<|endoftext|>",
41
+ "padding_side": "left",
42
+ "split_special_tokens": false,
43
+ "tokenizer_class": "Qwen2Tokenizer",
44
+ "unk_token": null
45
+ }
trainer_state.json ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.9761904761904763,
6
+ "eval_steps": 500,
7
+ "global_step": 166,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.05952380952380952,
14
+ "grad_norm": 7.34375,
15
+ "learning_rate": 8.815157634604766e-06,
16
+ "loss": 1.8887,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.11904761904761904,
21
+ "grad_norm": 3.90625,
22
+ "learning_rate": 1.983410467786072e-05,
23
+ "loss": 1.718,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.17857142857142858,
28
+ "grad_norm": 2.796875,
29
+ "learning_rate": 3.085305172111668e-05,
30
+ "loss": 1.5748,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.23809523809523808,
35
+ "grad_norm": 2.625,
36
+ "learning_rate": 4.187199876437263e-05,
37
+ "loss": 1.5417,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.2976190476190476,
42
+ "grad_norm": 2.1875,
43
+ "learning_rate": 5.28909458076286e-05,
44
+ "loss": 1.5065,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.35714285714285715,
49
+ "grad_norm": 2.359375,
50
+ "learning_rate": 6.390989285088455e-05,
51
+ "loss": 1.4201,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.4166666666666667,
56
+ "grad_norm": 2.265625,
57
+ "learning_rate": 7.492883989414051e-05,
58
+ "loss": 1.4179,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.47619047619047616,
63
+ "grad_norm": 2.25,
64
+ "learning_rate": 7.708414308620465e-05,
65
+ "loss": 1.3922,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.5357142857142857,
70
+ "grad_norm": 1.8671875,
71
+ "learning_rate": 7.688744639389479e-05,
72
+ "loss": 1.3509,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.5952380952380952,
77
+ "grad_norm": 2.59375,
78
+ "learning_rate": 7.654053825785006e-05,
79
+ "loss": 1.3861,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.6547619047619048,
84
+ "grad_norm": 2.40625,
85
+ "learning_rate": 7.604523563706609e-05,
86
+ "loss": 1.3591,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.7142857142857143,
91
+ "grad_norm": 2.046875,
92
+ "learning_rate": 7.540413271847275e-05,
93
+ "loss": 1.3542,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.7738095238095238,
98
+ "grad_norm": 1.9375,
99
+ "learning_rate": 7.462058732967345e-05,
100
+ "loss": 1.3694,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.8333333333333334,
105
+ "grad_norm": 1.9375,
106
+ "learning_rate": 7.369870335205563e-05,
107
+ "loss": 1.3377,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.8928571428571429,
112
+ "grad_norm": 2.0625,
113
+ "learning_rate": 7.264330922638506e-05,
114
+ "loss": 1.2981,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.9523809523809523,
119
+ "grad_norm": 1.9140625,
120
+ "learning_rate": 7.145993266346274e-05,
121
+ "loss": 1.3101,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.9880952380952381,
126
+ "eval_loss": 1.5184645652770996,
127
+ "eval_runtime": 6.8095,
128
+ "eval_samples_per_second": 29.371,
129
+ "eval_steps_per_second": 29.371,
130
+ "step": 83
131
+ },
132
+ {
133
+ "epoch": 1.0119047619047619,
134
+ "grad_norm": 2.125,
135
+ "learning_rate": 7.015477169229925e-05,
136
+ "loss": 1.2726,
137
+ "step": 85
138
+ },
139
+ {
140
+ "epoch": 1.0714285714285714,
141
+ "grad_norm": 2.0625,
142
+ "learning_rate": 6.873466219744416e-05,
143
+ "loss": 1.2178,
144
+ "step": 90
145
+ },
146
+ {
147
+ "epoch": 1.130952380952381,
148
+ "grad_norm": 2.015625,
149
+ "learning_rate": 6.72070421154961e-05,
150
+ "loss": 1.1864,
151
+ "step": 95
152
+ },
153
+ {
154
+ "epoch": 1.1904761904761905,
155
+ "grad_norm": 1.8828125,
156
+ "learning_rate": 6.55799124783174e-05,
157
+ "loss": 1.1736,
158
+ "step": 100
159
+ },
160
+ {
161
+ "epoch": 1.25,
162
+ "grad_norm": 1.9375,
163
+ "learning_rate": 6.386179550699237e-05,
164
+ "loss": 1.179,
165
+ "step": 105
166
+ },
167
+ {
168
+ "epoch": 1.3095238095238095,
169
+ "grad_norm": 1.953125,
170
+ "learning_rate": 6.206168997601557e-05,
171
+ "loss": 1.1931,
172
+ "step": 110
173
+ },
174
+ {
175
+ "epoch": 1.369047619047619,
176
+ "grad_norm": 1.8125,
177
+ "learning_rate": 6.0189024081493556e-05,
178
+ "loss": 1.1593,
179
+ "step": 115
180
+ },
181
+ {
182
+ "epoch": 1.4285714285714286,
183
+ "grad_norm": 2.1875,
184
+ "learning_rate": 5.825360606021676e-05,
185
+ "loss": 1.1416,
186
+ "step": 120
187
+ },
188
+ {
189
+ "epoch": 1.4880952380952381,
190
+ "grad_norm": 1.828125,
191
+ "learning_rate": 5.626557281823805e-05,
192
+ "loss": 1.1742,
193
+ "step": 125
194
+ },
195
+ {
196
+ "epoch": 1.5476190476190477,
197
+ "grad_norm": 1.6640625,
198
+ "learning_rate": 5.423533683801979e-05,
199
+ "loss": 1.172,
200
+ "step": 130
201
+ },
202
+ {
203
+ "epoch": 1.6071428571428572,
204
+ "grad_norm": 1.6171875,
205
+ "learning_rate": 5.2173531642227546e-05,
206
+ "loss": 1.1783,
207
+ "step": 135
208
+ },
209
+ {
210
+ "epoch": 1.6666666666666665,
211
+ "grad_norm": 1.8203125,
212
+ "learning_rate": 5.009095609980816e-05,
213
+ "loss": 1.1861,
214
+ "step": 140
215
+ },
216
+ {
217
+ "epoch": 1.7261904761904763,
218
+ "grad_norm": 1.6796875,
219
+ "learning_rate": 4.799851786605331e-05,
220
+ "loss": 1.1517,
221
+ "step": 145
222
+ },
223
+ {
224
+ "epoch": 1.7857142857142856,
225
+ "grad_norm": 1.609375,
226
+ "learning_rate": 4.590717625288627e-05,
227
+ "loss": 1.1703,
228
+ "step": 150
229
+ },
230
+ {
231
+ "epoch": 1.8452380952380953,
232
+ "grad_norm": 1.90625,
233
+ "learning_rate": 4.382788482859252e-05,
234
+ "loss": 1.1476,
235
+ "step": 155
236
+ },
237
+ {
238
+ "epoch": 1.9047619047619047,
239
+ "grad_norm": 1.7734375,
240
+ "learning_rate": 4.177153404763354e-05,
241
+ "loss": 1.1682,
242
+ "step": 160
243
+ },
244
+ {
245
+ "epoch": 1.9642857142857144,
246
+ "grad_norm": 1.6328125,
247
+ "learning_rate": 3.974889421102398e-05,
248
+ "loss": 1.1397,
249
+ "step": 165
250
+ },
251
+ {
252
+ "epoch": 1.9761904761904763,
253
+ "eval_loss": 1.4646453857421875,
254
+ "eval_runtime": 6.7251,
255
+ "eval_samples_per_second": 29.739,
256
+ "eval_steps_per_second": 29.739,
257
+ "step": 166
258
+ }
259
+ ],
260
+ "logging_steps": 5,
261
+ "max_steps": 252,
262
+ "num_input_tokens_seen": 0,
263
+ "num_train_epochs": 3,
264
+ "save_steps": 500,
265
+ "stateful_callbacks": {
266
+ "TrainerControl": {
267
+ "args": {
268
+ "should_epoch_stop": false,
269
+ "should_evaluate": false,
270
+ "should_log": false,
271
+ "should_save": true,
272
+ "should_training_stop": false
273
+ },
274
+ "attributes": {}
275
+ }
276
+ },
277
+ "total_flos": 1.0175749958192333e+17,
278
+ "train_batch_size": 140,
279
+ "trial_name": null,
280
+ "trial_params": null
281
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebe9af9cadaa6d212d9f4a97461ffa4480594d6bdb8d3fa080e3d0c70b3fad71
3
+ size 5688
vocab.json ADDED
The diff for this file is too large to render. See raw diff