moock commited on
Commit
23bc122
1 Parent(s): d3622ce

End of training

Browse files
README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: microsoft/swinv2-tiny-patch4-window8-256
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: swinv2-tiny-patch4-window8-256-finetuned-gardner-icm-max
12
+ results:
13
+ - task:
14
+ name: Image Classification
15
+ type: image-classification
16
+ dataset:
17
+ name: imagefolder
18
+ type: imagefolder
19
+ config: default
20
+ split: train
21
+ args: default
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.6428571428571429
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # swinv2-tiny-patch4-window8-256-finetuned-gardner-icm-max
32
+
33
+ This model is a fine-tuned version of [microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256) on the imagefolder dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 1.0741
36
+ - Accuracy: 0.6429
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 5e-05
56
+ - train_batch_size: 32
57
+ - eval_batch_size: 32
58
+ - seed: 42
59
+ - gradient_accumulation_steps: 4
60
+ - total_train_batch_size: 128
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - lr_scheduler_warmup_ratio: 0.1
64
+ - num_epochs: 20
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
+ | 1.0925 | 0.94 | 11 | 1.0631 | 0.7952 |
71
+ | 0.9552 | 1.96 | 23 | 0.6336 | 0.7952 |
72
+ | 0.6566 | 2.98 | 35 | 0.5356 | 0.7952 |
73
+ | 0.5686 | 4.0 | 47 | 0.5150 | 0.7952 |
74
+ | 0.5703 | 4.94 | 58 | 0.5129 | 0.7952 |
75
+ | 0.5726 | 5.96 | 70 | 0.5154 | 0.7952 |
76
+ | 0.5482 | 6.98 | 82 | 0.5142 | 0.7952 |
77
+ | 0.568 | 8.0 | 94 | 0.5109 | 0.7952 |
78
+ | 0.5245 | 8.94 | 105 | 0.5134 | 0.7952 |
79
+ | 0.5979 | 9.96 | 117 | 0.5238 | 0.7952 |
80
+ | 0.5442 | 10.98 | 129 | 0.5076 | 0.7952 |
81
+ | 0.545 | 12.0 | 141 | 0.5062 | 0.7952 |
82
+ | 0.5514 | 12.94 | 152 | 0.5013 | 0.7952 |
83
+ | 0.5377 | 13.96 | 164 | 0.5045 | 0.7952 |
84
+ | 0.5282 | 14.98 | 176 | 0.5038 | 0.7952 |
85
+ | 0.5389 | 16.0 | 188 | 0.4994 | 0.7952 |
86
+ | 0.5039 | 16.94 | 199 | 0.4996 | 0.7952 |
87
+ | 0.5348 | 17.96 | 211 | 0.4940 | 0.7952 |
88
+ | 0.5426 | 18.72 | 220 | 0.4947 | 0.7952 |
89
+
90
+
91
+ ### Framework versions
92
+
93
+ - Transformers 4.36.2
94
+ - Pytorch 2.1.2
95
+ - Datasets 2.16.0
96
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 18.72,
3
+ "eval_accuracy": 0.6428571428571429,
4
+ "eval_loss": 1.0741071701049805,
5
+ "eval_runtime": 4.5667,
6
+ "eval_samples_per_second": 52.116,
7
+ "eval_steps_per_second": 1.752,
8
+ "train_loss": 0.595783019065857,
9
+ "train_runtime": 1248.9487,
10
+ "train_samples_per_second": 23.796,
11
+ "train_steps_per_second": 0.176
12
+ }
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/swinv2-tiny-patch4-window8-256",
3
+ "architectures": [
4
+ "Swinv2ForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 6,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 96,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 768,
19
+ "id2label": {
20
+ "0": "0",
21
+ "1": "1",
22
+ "2": "2"
23
+ },
24
+ "image_size": 256,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "0": 0,
28
+ "1": 1,
29
+ "2": 2
30
+ },
31
+ "layer_norm_eps": 1e-05,
32
+ "mlp_ratio": 4.0,
33
+ "model_type": "swinv2",
34
+ "num_channels": 3,
35
+ "num_heads": [
36
+ 3,
37
+ 6,
38
+ 12,
39
+ 24
40
+ ],
41
+ "num_layers": 4,
42
+ "patch_size": 4,
43
+ "path_norm": true,
44
+ "pretrained_window_sizes": [
45
+ 0,
46
+ 0,
47
+ 0,
48
+ 0
49
+ ],
50
+ "problem_type": "single_label_classification",
51
+ "qkv_bias": true,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.36.2",
54
+ "use_absolute_embeddings": false,
55
+ "window_size": 8
56
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 18.72,
3
+ "eval_accuracy": 0.6428571428571429,
4
+ "eval_loss": 1.0741071701049805,
5
+ "eval_runtime": 4.5667,
6
+ "eval_samples_per_second": 52.116,
7
+ "eval_steps_per_second": 1.752
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65be09d36da40c6d8dfe6ffbb404e735f5bb14937d77df09ec924f331ce7c977
3
+ size 110353212
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.229,
13
+ 0.224,
14
+ 0.225
15
+ ],
16
+ "resample": 3,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 256,
20
+ "width": 256
21
+ }
22
+ }
runs/Dec28_18-44-20_MacBook-Pro-de-Max-2.local/events.out.tfevents.1703785531.MacBook-Pro-de-Max-2.local.34768.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aa379bd3a1e87cf0c81a8e1f4a21b8c1a7218b943676e2bbe60f853d919ec61
3
+ size 14628
runs/Dec28_18-44-20_MacBook-Pro-de-Max-2.local/events.out.tfevents.1703786843.MacBook-Pro-de-Max-2.local.34768.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90d4c26f538e46a898ce2b0b614a742811d2aa098be294e09a4d8ee16576d145
3
+ size 734
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 18.72,
3
+ "train_loss": 0.595783019065857,
4
+ "train_runtime": 1248.9487,
5
+ "train_samples_per_second": 23.796,
6
+ "train_steps_per_second": 0.176
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7951807228915663,
3
+ "best_model_checkpoint": "swinv2-tiny-patch4-window8-256-finetuned-gardner-icm-max/checkpoint-11",
4
+ "epoch": 18.72340425531915,
5
+ "eval_steps": 500,
6
+ "global_step": 220,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.85,
13
+ "learning_rate": 2.272727272727273e-05,
14
+ "loss": 1.0925,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.94,
19
+ "eval_accuracy": 0.7951807228915663,
20
+ "eval_loss": 1.0630700588226318,
21
+ "eval_runtime": 8.4566,
22
+ "eval_samples_per_second": 19.63,
23
+ "eval_steps_per_second": 0.71,
24
+ "step": 11
25
+ },
26
+ {
27
+ "epoch": 1.7,
28
+ "learning_rate": 4.545454545454546e-05,
29
+ "loss": 0.9552,
30
+ "step": 20
31
+ },
32
+ {
33
+ "epoch": 1.96,
34
+ "eval_accuracy": 0.7951807228915663,
35
+ "eval_loss": 0.633553683757782,
36
+ "eval_runtime": 2.9014,
37
+ "eval_samples_per_second": 57.213,
38
+ "eval_steps_per_second": 2.068,
39
+ "step": 23
40
+ },
41
+ {
42
+ "epoch": 2.55,
43
+ "learning_rate": 4.797979797979798e-05,
44
+ "loss": 0.6566,
45
+ "step": 30
46
+ },
47
+ {
48
+ "epoch": 2.98,
49
+ "eval_accuracy": 0.7951807228915663,
50
+ "eval_loss": 0.5356065630912781,
51
+ "eval_runtime": 2.9308,
52
+ "eval_samples_per_second": 56.639,
53
+ "eval_steps_per_second": 2.047,
54
+ "step": 35
55
+ },
56
+ {
57
+ "epoch": 3.4,
58
+ "learning_rate": 4.545454545454546e-05,
59
+ "loss": 0.5686,
60
+ "step": 40
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "eval_accuracy": 0.7951807228915663,
65
+ "eval_loss": 0.5150089263916016,
66
+ "eval_runtime": 3.1556,
67
+ "eval_samples_per_second": 52.605,
68
+ "eval_steps_per_second": 1.901,
69
+ "step": 47
70
+ },
71
+ {
72
+ "epoch": 4.26,
73
+ "learning_rate": 4.292929292929293e-05,
74
+ "loss": 0.5703,
75
+ "step": 50
76
+ },
77
+ {
78
+ "epoch": 4.94,
79
+ "eval_accuracy": 0.7951807228915663,
80
+ "eval_loss": 0.5129297971725464,
81
+ "eval_runtime": 2.9359,
82
+ "eval_samples_per_second": 56.542,
83
+ "eval_steps_per_second": 2.044,
84
+ "step": 58
85
+ },
86
+ {
87
+ "epoch": 5.11,
88
+ "learning_rate": 4.0404040404040405e-05,
89
+ "loss": 0.5361,
90
+ "step": 60
91
+ },
92
+ {
93
+ "epoch": 5.96,
94
+ "learning_rate": 3.787878787878788e-05,
95
+ "loss": 0.5726,
96
+ "step": 70
97
+ },
98
+ {
99
+ "epoch": 5.96,
100
+ "eval_accuracy": 0.7951807228915663,
101
+ "eval_loss": 0.5154421925544739,
102
+ "eval_runtime": 2.9132,
103
+ "eval_samples_per_second": 56.982,
104
+ "eval_steps_per_second": 2.06,
105
+ "step": 70
106
+ },
107
+ {
108
+ "epoch": 6.81,
109
+ "learning_rate": 3.535353535353535e-05,
110
+ "loss": 0.5482,
111
+ "step": 80
112
+ },
113
+ {
114
+ "epoch": 6.98,
115
+ "eval_accuracy": 0.7951807228915663,
116
+ "eval_loss": 0.5141979455947876,
117
+ "eval_runtime": 2.9168,
118
+ "eval_samples_per_second": 56.912,
119
+ "eval_steps_per_second": 2.057,
120
+ "step": 82
121
+ },
122
+ {
123
+ "epoch": 7.66,
124
+ "learning_rate": 3.282828282828283e-05,
125
+ "loss": 0.568,
126
+ "step": 90
127
+ },
128
+ {
129
+ "epoch": 8.0,
130
+ "eval_accuracy": 0.7951807228915663,
131
+ "eval_loss": 0.5108516812324524,
132
+ "eval_runtime": 2.9147,
133
+ "eval_samples_per_second": 56.952,
134
+ "eval_steps_per_second": 2.059,
135
+ "step": 94
136
+ },
137
+ {
138
+ "epoch": 8.51,
139
+ "learning_rate": 3.0303030303030306e-05,
140
+ "loss": 0.5245,
141
+ "step": 100
142
+ },
143
+ {
144
+ "epoch": 8.94,
145
+ "eval_accuracy": 0.7951807228915663,
146
+ "eval_loss": 0.5134214162826538,
147
+ "eval_runtime": 2.9007,
148
+ "eval_samples_per_second": 57.227,
149
+ "eval_steps_per_second": 2.068,
150
+ "step": 105
151
+ },
152
+ {
153
+ "epoch": 9.36,
154
+ "learning_rate": 2.777777777777778e-05,
155
+ "loss": 0.5979,
156
+ "step": 110
157
+ },
158
+ {
159
+ "epoch": 9.96,
160
+ "eval_accuracy": 0.7951807228915663,
161
+ "eval_loss": 0.5237765908241272,
162
+ "eval_runtime": 2.9436,
163
+ "eval_samples_per_second": 56.394,
164
+ "eval_steps_per_second": 2.038,
165
+ "step": 117
166
+ },
167
+ {
168
+ "epoch": 10.21,
169
+ "learning_rate": 2.5252525252525256e-05,
170
+ "loss": 0.5442,
171
+ "step": 120
172
+ },
173
+ {
174
+ "epoch": 10.98,
175
+ "eval_accuracy": 0.7951807228915663,
176
+ "eval_loss": 0.507612943649292,
177
+ "eval_runtime": 2.9562,
178
+ "eval_samples_per_second": 56.154,
179
+ "eval_steps_per_second": 2.03,
180
+ "step": 129
181
+ },
182
+ {
183
+ "epoch": 11.06,
184
+ "learning_rate": 2.272727272727273e-05,
185
+ "loss": 0.5451,
186
+ "step": 130
187
+ },
188
+ {
189
+ "epoch": 11.91,
190
+ "learning_rate": 2.0202020202020203e-05,
191
+ "loss": 0.545,
192
+ "step": 140
193
+ },
194
+ {
195
+ "epoch": 12.0,
196
+ "eval_accuracy": 0.7951807228915663,
197
+ "eval_loss": 0.5061953067779541,
198
+ "eval_runtime": 2.9818,
199
+ "eval_samples_per_second": 55.671,
200
+ "eval_steps_per_second": 2.012,
201
+ "step": 141
202
+ },
203
+ {
204
+ "epoch": 12.77,
205
+ "learning_rate": 1.7676767676767676e-05,
206
+ "loss": 0.5514,
207
+ "step": 150
208
+ },
209
+ {
210
+ "epoch": 12.94,
211
+ "eval_accuracy": 0.7951807228915663,
212
+ "eval_loss": 0.5012802481651306,
213
+ "eval_runtime": 3.0403,
214
+ "eval_samples_per_second": 54.599,
215
+ "eval_steps_per_second": 1.973,
216
+ "step": 152
217
+ },
218
+ {
219
+ "epoch": 13.62,
220
+ "learning_rate": 1.5151515151515153e-05,
221
+ "loss": 0.5377,
222
+ "step": 160
223
+ },
224
+ {
225
+ "epoch": 13.96,
226
+ "eval_accuracy": 0.7951807228915663,
227
+ "eval_loss": 0.5044662356376648,
228
+ "eval_runtime": 3.0226,
229
+ "eval_samples_per_second": 54.919,
230
+ "eval_steps_per_second": 1.985,
231
+ "step": 164
232
+ },
233
+ {
234
+ "epoch": 14.47,
235
+ "learning_rate": 1.2626262626262628e-05,
236
+ "loss": 0.5282,
237
+ "step": 170
238
+ },
239
+ {
240
+ "epoch": 14.98,
241
+ "eval_accuracy": 0.7951807228915663,
242
+ "eval_loss": 0.5037835836410522,
243
+ "eval_runtime": 2.9367,
244
+ "eval_samples_per_second": 56.525,
245
+ "eval_steps_per_second": 2.043,
246
+ "step": 176
247
+ },
248
+ {
249
+ "epoch": 15.32,
250
+ "learning_rate": 1.0101010101010101e-05,
251
+ "loss": 0.5389,
252
+ "step": 180
253
+ },
254
+ {
255
+ "epoch": 16.0,
256
+ "eval_accuracy": 0.7951807228915663,
257
+ "eval_loss": 0.4994117021560669,
258
+ "eval_runtime": 2.9413,
259
+ "eval_samples_per_second": 56.437,
260
+ "eval_steps_per_second": 2.04,
261
+ "step": 188
262
+ },
263
+ {
264
+ "epoch": 16.17,
265
+ "learning_rate": 7.5757575757575764e-06,
266
+ "loss": 0.5039,
267
+ "step": 190
268
+ },
269
+ {
270
+ "epoch": 16.94,
271
+ "eval_accuracy": 0.7951807228915663,
272
+ "eval_loss": 0.49963268637657166,
273
+ "eval_runtime": 3.0102,
274
+ "eval_samples_per_second": 55.146,
275
+ "eval_steps_per_second": 1.993,
276
+ "step": 199
277
+ },
278
+ {
279
+ "epoch": 17.02,
280
+ "learning_rate": 5.050505050505051e-06,
281
+ "loss": 0.5449,
282
+ "step": 200
283
+ },
284
+ {
285
+ "epoch": 17.87,
286
+ "learning_rate": 2.5252525252525253e-06,
287
+ "loss": 0.5348,
288
+ "step": 210
289
+ },
290
+ {
291
+ "epoch": 17.96,
292
+ "eval_accuracy": 0.7951807228915663,
293
+ "eval_loss": 0.493960976600647,
294
+ "eval_runtime": 2.9022,
295
+ "eval_samples_per_second": 57.197,
296
+ "eval_steps_per_second": 2.067,
297
+ "step": 211
298
+ },
299
+ {
300
+ "epoch": 18.72,
301
+ "learning_rate": 0.0,
302
+ "loss": 0.5426,
303
+ "step": 220
304
+ },
305
+ {
306
+ "epoch": 18.72,
307
+ "eval_accuracy": 0.7951807228915663,
308
+ "eval_loss": 0.49473580718040466,
309
+ "eval_runtime": 2.8726,
310
+ "eval_samples_per_second": 57.787,
311
+ "eval_steps_per_second": 2.089,
312
+ "step": 220
313
+ },
314
+ {
315
+ "epoch": 18.72,
316
+ "step": 220,
317
+ "total_flos": 9.056508197685166e+17,
318
+ "train_loss": 0.595783019065857,
319
+ "train_runtime": 1248.9487,
320
+ "train_samples_per_second": 23.796,
321
+ "train_steps_per_second": 0.176
322
+ }
323
+ ],
324
+ "logging_steps": 10,
325
+ "max_steps": 220,
326
+ "num_input_tokens_seen": 0,
327
+ "num_train_epochs": 20,
328
+ "save_steps": 500,
329
+ "total_flos": 9.056508197685166e+17,
330
+ "train_batch_size": 32,
331
+ "trial_name": null,
332
+ "trial_params": null
333
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81e9c804b177a239e1066ec42d5a893c7bd6e29d0f1ad2294063020d5d452657
3
+ size 4792