wangjiawei2003 commited on
Commit
7c2d9fe
1 Parent(s): 069781d

Training in progress, epoch 5

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b9d9dd5e42dd37a12d1ef0a5fe8a0efec6601918626ed828ae9beab9a6b9fb3
3
  size 267832560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96fcea88f4e36116893152c1b8c23693d3da1aba8314e57af9ebf53256a551d9
3
  size 267832560
run-0/checkpoint-21050/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "problem_type": "single_label_classification",
18
+ "qa_dropout": 0.1,
19
+ "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": false,
21
+ "tie_weights_": true,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.40.2",
24
+ "vocab_size": 30522
25
+ }
run-0/checkpoint-21050/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96fcea88f4e36116893152c1b8c23693d3da1aba8314e57af9ebf53256a551d9
3
+ size 267832560
run-0/checkpoint-21050/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a6d9b9670f56c83a4bd7d02564cf7ac162928f517f0667ddcbffec58bb017e0
3
+ size 535727290
run-0/checkpoint-21050/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05db813fc280afb4e8ae7f51e765f3105c228f2196254b6e7bd4f99ca3f9cd2f
3
+ size 14244
run-0/checkpoint-21050/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9aee36023f19a47d9bf0a07232ef86681148093588b2dcc7872b4f7989d0307
3
+ size 1064
run-0/checkpoint-21050/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-0/checkpoint-21050/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-0/checkpoint-21050/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
run-0/checkpoint-21050/trainer_state.json ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9036697247706422,
3
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-sst2/run-0/checkpoint-4210",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 21050,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1187648456057007,
13
+ "grad_norm": 9.763530731201172,
14
+ "learning_rate": 2.4130303407723998e-05,
15
+ "loss": 0.3459,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.2375296912114014,
20
+ "grad_norm": 7.90731954574585,
21
+ "learning_rate": 2.3543191402669885e-05,
22
+ "loss": 0.2557,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.35629453681710216,
27
+ "grad_norm": 7.188111305236816,
28
+ "learning_rate": 2.2956079397615776e-05,
29
+ "loss": 0.2242,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.4750593824228028,
34
+ "grad_norm": 9.416199684143066,
35
+ "learning_rate": 2.2368967392561663e-05,
36
+ "loss": 0.2281,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.5938242280285035,
41
+ "grad_norm": 6.982799053192139,
42
+ "learning_rate": 2.1781855387507554e-05,
43
+ "loss": 0.2196,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.7125890736342043,
48
+ "grad_norm": 3.475909948348999,
49
+ "learning_rate": 2.1194743382453438e-05,
50
+ "loss": 0.199,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.831353919239905,
55
+ "grad_norm": 18.930234909057617,
56
+ "learning_rate": 2.0607631377399328e-05,
57
+ "loss": 0.1988,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 0.9501187648456056,
62
+ "grad_norm": 21.44248390197754,
63
+ "learning_rate": 2.0020519372345215e-05,
64
+ "loss": 0.1964,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 1.0,
69
+ "eval_accuracy": 0.9036697247706422,
70
+ "eval_loss": 0.279630184173584,
71
+ "eval_runtime": 1.1981,
72
+ "eval_samples_per_second": 727.831,
73
+ "eval_steps_per_second": 45.907,
74
+ "step": 4210
75
+ },
76
+ {
77
+ "epoch": 1.0688836104513064,
78
+ "grad_norm": 6.991998672485352,
79
+ "learning_rate": 1.9433407367291103e-05,
80
+ "loss": 0.1449,
81
+ "step": 4500
82
+ },
83
+ {
84
+ "epoch": 1.187648456057007,
85
+ "grad_norm": 1.0682291984558105,
86
+ "learning_rate": 1.8846295362236993e-05,
87
+ "loss": 0.1307,
88
+ "step": 5000
89
+ },
90
+ {
91
+ "epoch": 1.3064133016627077,
92
+ "grad_norm": 1.4475390911102295,
93
+ "learning_rate": 1.825918335718288e-05,
94
+ "loss": 0.1248,
95
+ "step": 5500
96
+ },
97
+ {
98
+ "epoch": 1.4251781472684084,
99
+ "grad_norm": 0.4122050702571869,
100
+ "learning_rate": 1.767207135212877e-05,
101
+ "loss": 0.1288,
102
+ "step": 6000
103
+ },
104
+ {
105
+ "epoch": 1.5439429928741093,
106
+ "grad_norm": 0.35570672154426575,
107
+ "learning_rate": 1.7084959347074655e-05,
108
+ "loss": 0.1353,
109
+ "step": 6500
110
+ },
111
+ {
112
+ "epoch": 1.66270783847981,
113
+ "grad_norm": 0.9978006482124329,
114
+ "learning_rate": 1.6497847342020545e-05,
115
+ "loss": 0.1316,
116
+ "step": 7000
117
+ },
118
+ {
119
+ "epoch": 1.7814726840855108,
120
+ "grad_norm": 7.568444728851318,
121
+ "learning_rate": 1.5910735336966433e-05,
122
+ "loss": 0.1363,
123
+ "step": 7500
124
+ },
125
+ {
126
+ "epoch": 1.9002375296912115,
127
+ "grad_norm": 0.1252310872077942,
128
+ "learning_rate": 1.532362333191232e-05,
129
+ "loss": 0.1271,
130
+ "step": 8000
131
+ },
132
+ {
133
+ "epoch": 2.0,
134
+ "eval_accuracy": 0.8887614678899083,
135
+ "eval_loss": 0.37789419293403625,
136
+ "eval_runtime": 1.2,
137
+ "eval_samples_per_second": 726.651,
138
+ "eval_steps_per_second": 45.832,
139
+ "step": 8420
140
+ },
141
+ {
142
+ "epoch": 2.019002375296912,
143
+ "grad_norm": 0.2641623616218567,
144
+ "learning_rate": 1.4736511326858209e-05,
145
+ "loss": 0.114,
146
+ "step": 8500
147
+ },
148
+ {
149
+ "epoch": 2.137767220902613,
150
+ "grad_norm": 16.91781234741211,
151
+ "learning_rate": 1.4149399321804098e-05,
152
+ "loss": 0.0814,
153
+ "step": 9000
154
+ },
155
+ {
156
+ "epoch": 2.2565320665083135,
157
+ "grad_norm": 0.13691911101341248,
158
+ "learning_rate": 1.3562287316749987e-05,
159
+ "loss": 0.0807,
160
+ "step": 9500
161
+ },
162
+ {
163
+ "epoch": 2.375296912114014,
164
+ "grad_norm": 14.652079582214355,
165
+ "learning_rate": 1.2975175311695872e-05,
166
+ "loss": 0.0909,
167
+ "step": 10000
168
+ },
169
+ {
170
+ "epoch": 2.494061757719715,
171
+ "grad_norm": 0.23481616377830505,
172
+ "learning_rate": 1.2388063306641761e-05,
173
+ "loss": 0.0823,
174
+ "step": 10500
175
+ },
176
+ {
177
+ "epoch": 2.6128266033254155,
178
+ "grad_norm": 0.16778022050857544,
179
+ "learning_rate": 1.1800951301587648e-05,
180
+ "loss": 0.0866,
181
+ "step": 11000
182
+ },
183
+ {
184
+ "epoch": 2.731591448931116,
185
+ "grad_norm": 22.94564437866211,
186
+ "learning_rate": 1.1213839296533537e-05,
187
+ "loss": 0.0894,
188
+ "step": 11500
189
+ },
190
+ {
191
+ "epoch": 2.850356294536817,
192
+ "grad_norm": 4.668305397033691,
193
+ "learning_rate": 1.0626727291479424e-05,
194
+ "loss": 0.0969,
195
+ "step": 12000
196
+ },
197
+ {
198
+ "epoch": 2.969121140142518,
199
+ "grad_norm": 0.056590911000967026,
200
+ "learning_rate": 1.0039615286425313e-05,
201
+ "loss": 0.0932,
202
+ "step": 12500
203
+ },
204
+ {
205
+ "epoch": 3.0,
206
+ "eval_accuracy": 0.8990825688073395,
207
+ "eval_loss": 0.4282897412776947,
208
+ "eval_runtime": 1.1944,
209
+ "eval_samples_per_second": 730.051,
210
+ "eval_steps_per_second": 46.047,
211
+ "step": 12630
212
+ },
213
+ {
214
+ "epoch": 3.0878859857482186,
215
+ "grad_norm": 0.023897308856248856,
216
+ "learning_rate": 9.452503281371202e-06,
217
+ "loss": 0.0537,
218
+ "step": 13000
219
+ },
220
+ {
221
+ "epoch": 3.2066508313539193,
222
+ "grad_norm": 0.7962842583656311,
223
+ "learning_rate": 8.86539127631709e-06,
224
+ "loss": 0.0473,
225
+ "step": 13500
226
+ },
227
+ {
228
+ "epoch": 3.32541567695962,
229
+ "grad_norm": 0.47386619448661804,
230
+ "learning_rate": 8.278279271262978e-06,
231
+ "loss": 0.0608,
232
+ "step": 14000
233
+ },
234
+ {
235
+ "epoch": 3.4441805225653206,
236
+ "grad_norm": 0.06599200516939163,
237
+ "learning_rate": 7.691167266208865e-06,
238
+ "loss": 0.0677,
239
+ "step": 14500
240
+ },
241
+ {
242
+ "epoch": 3.5629453681710213,
243
+ "grad_norm": 0.18340125679969788,
244
+ "learning_rate": 7.104055261154754e-06,
245
+ "loss": 0.0579,
246
+ "step": 15000
247
+ },
248
+ {
249
+ "epoch": 3.6817102137767224,
250
+ "grad_norm": 5.418150901794434,
251
+ "learning_rate": 6.5169432561006415e-06,
252
+ "loss": 0.0517,
253
+ "step": 15500
254
+ },
255
+ {
256
+ "epoch": 3.800475059382423,
257
+ "grad_norm": 19.50031852722168,
258
+ "learning_rate": 5.92983125104653e-06,
259
+ "loss": 0.0617,
260
+ "step": 16000
261
+ },
262
+ {
263
+ "epoch": 3.9192399049881237,
264
+ "grad_norm": 0.0843653604388237,
265
+ "learning_rate": 5.3427192459924185e-06,
266
+ "loss": 0.0599,
267
+ "step": 16500
268
+ },
269
+ {
270
+ "epoch": 4.0,
271
+ "eval_accuracy": 0.9036697247706422,
272
+ "eval_loss": 0.466791033744812,
273
+ "eval_runtime": 1.2178,
274
+ "eval_samples_per_second": 716.054,
275
+ "eval_steps_per_second": 45.164,
276
+ "step": 16840
277
+ },
278
+ {
279
+ "epoch": 4.038004750593824,
280
+ "grad_norm": 0.04484863579273224,
281
+ "learning_rate": 4.7556072409383065e-06,
282
+ "loss": 0.0424,
283
+ "step": 17000
284
+ },
285
+ {
286
+ "epoch": 4.156769596199525,
287
+ "grad_norm": 0.0025116826873272657,
288
+ "learning_rate": 4.1684952358841946e-06,
289
+ "loss": 0.0335,
290
+ "step": 17500
291
+ },
292
+ {
293
+ "epoch": 4.275534441805226,
294
+ "grad_norm": 0.4570842981338501,
295
+ "learning_rate": 3.581383230830083e-06,
296
+ "loss": 0.0395,
297
+ "step": 18000
298
+ },
299
+ {
300
+ "epoch": 4.394299287410926,
301
+ "grad_norm": 0.7941423654556274,
302
+ "learning_rate": 2.9942712257759707e-06,
303
+ "loss": 0.0334,
304
+ "step": 18500
305
+ },
306
+ {
307
+ "epoch": 4.513064133016627,
308
+ "grad_norm": 3.7694590091705322,
309
+ "learning_rate": 2.4071592207218587e-06,
310
+ "loss": 0.035,
311
+ "step": 19000
312
+ },
313
+ {
314
+ "epoch": 4.631828978622328,
315
+ "grad_norm": 0.018865738064050674,
316
+ "learning_rate": 1.8200472156677468e-06,
317
+ "loss": 0.0373,
318
+ "step": 19500
319
+ },
320
+ {
321
+ "epoch": 4.750593824228028,
322
+ "grad_norm": 0.03485831245779991,
323
+ "learning_rate": 1.232935210613635e-06,
324
+ "loss": 0.0282,
325
+ "step": 20000
326
+ },
327
+ {
328
+ "epoch": 4.869358669833729,
329
+ "grad_norm": 0.014678980223834515,
330
+ "learning_rate": 6.458232055595231e-07,
331
+ "loss": 0.0369,
332
+ "step": 20500
333
+ },
334
+ {
335
+ "epoch": 4.98812351543943,
336
+ "grad_norm": 0.06822077184915543,
337
+ "learning_rate": 5.871120050541119e-08,
338
+ "loss": 0.0299,
339
+ "step": 21000
340
+ },
341
+ {
342
+ "epoch": 5.0,
343
+ "eval_accuracy": 0.9025229357798165,
344
+ "eval_loss": 0.5252613425254822,
345
+ "eval_runtime": 1.204,
346
+ "eval_samples_per_second": 724.232,
347
+ "eval_steps_per_second": 45.68,
348
+ "step": 21050
349
+ }
350
+ ],
351
+ "logging_steps": 500,
352
+ "max_steps": 21050,
353
+ "num_input_tokens_seen": 0,
354
+ "num_train_epochs": 5,
355
+ "save_steps": 500,
356
+ "total_flos": 3061523906193576.0,
357
+ "train_batch_size": 16,
358
+ "trial_name": null,
359
+ "trial_params": {
360
+ "learning_rate": 2.471741541277811e-05,
361
+ "num_train_epochs": 5,
362
+ "per_device_train_batch_size": 16,
363
+ "seed": 27
364
+ }
365
+ }
run-0/checkpoint-21050/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ec7297f5c06e907634b65b0c3c004d7bc361b158c4a0a3f342fbd4132ec430
3
+ size 5048
run-0/checkpoint-21050/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
runs/May14_13-42-22_d0b6fd4b0e89/events.out.tfevents.1715696155.d0b6fd4b0e89.396.2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a788597ce7ab2e38d3ac99dd15e6250b755ae3e9182ad7ff7bc74b52e7dedc4
3
- size 13118
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cd19ee9a1365e3a5f7f91c33bd72268dcb9317fc3ed1c838f2dae76e7e77f7b
3
+ size 15527