ZhaoYoujia commited on
Commit
70f3a9c
1 Parent(s): 850215d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - autotrain-vit-base-2/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 1.019775390625
23
+
24
+ f1_macro: 0.6
25
+
26
+ f1_micro: 0.75
27
+
28
+ f1_weighted: 0.65
29
+
30
+ precision_macro: 0.5555555555555555
31
+
32
+ precision_micro: 0.75
33
+
34
+ precision_weighted: 0.5833333333333333
35
+
36
+ recall_macro: 0.6666666666666666
37
+
38
+ recall_micro: 0.75
39
+
40
+ recall_weighted: 0.75
41
+
42
+ accuracy: 0.75
checkpoint-18/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "11covered_with_a_quilt_and_only_the_head_exposed",
14
+ "1": "12covered_with_a_quilt_and_exposed_other_parts_of_the_body",
15
+ "2": "13has_nothing_to_do_with_11_and_12_above"
16
+ },
17
+ "image_size": 224,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "11covered_with_a_quilt_and_only_the_head_exposed": 0,
22
+ "12covered_with_a_quilt_and_exposed_other_parts_of_the_body": 1,
23
+ "13has_nothing_to_do_with_11_and_12_above": 2
24
+ },
25
+ "layer_norm_eps": 1e-12,
26
+ "model_type": "vit",
27
+ "num_attention_heads": 12,
28
+ "num_channels": 3,
29
+ "num_hidden_layers": 12,
30
+ "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
+ "qkv_bias": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.36.1"
35
+ }
checkpoint-18/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24233972a8514bb0b6b5c162543c50d29e3276514bb8a405f94d5a265d6eec72
3
+ size 343227052
checkpoint-18/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f978f751df2da7186260dcbd47e23457f4623112153ffb5659f5bfa4488e20d8
3
+ size 686575034
checkpoint-18/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cc23527a4dcb91e6dbe6ae59d6b6ebacff1c319ca563a153911b9489f92fd26
3
+ size 14180
checkpoint-18/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3f20bfc16e51685bd013a5fd8ab0cb541785bffa7ddc771a29458bf97bfc574
3
+ size 1064
checkpoint-18/trainer_state.json ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.019775390625,
3
+ "best_model_checkpoint": "autotrain-vit-base-2/checkpoint-18",
4
+ "epoch": 9.0,
5
+ "eval_steps": 500,
6
+ "global_step": 18,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5,
13
+ "learning_rate": 0.0,
14
+ "loss": 1.0986,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 1.0,
19
+ "learning_rate": 5e-05,
20
+ "loss": 1.0986,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 1.0,
25
+ "eval_accuracy": 0.5,
26
+ "eval_f1_macro": 0.26666666666666666,
27
+ "eval_f1_micro": 0.5,
28
+ "eval_f1_weighted": 0.4,
29
+ "eval_loss": 1.0986328125,
30
+ "eval_precision_macro": 0.2222222222222222,
31
+ "eval_precision_micro": 0.5,
32
+ "eval_precision_weighted": 0.3333333333333333,
33
+ "eval_recall_macro": 0.3333333333333333,
34
+ "eval_recall_micro": 0.5,
35
+ "eval_recall_weighted": 0.5,
36
+ "eval_runtime": 0.1241,
37
+ "eval_samples_per_second": 32.227,
38
+ "eval_steps_per_second": 8.057,
39
+ "step": 2
40
+ },
41
+ {
42
+ "epoch": 1.5,
43
+ "learning_rate": 0.0001,
44
+ "loss": 1.0986,
45
+ "step": 3
46
+ },
47
+ {
48
+ "epoch": 2.0,
49
+ "learning_rate": 9.444444444444444e-05,
50
+ "loss": 1.0999,
51
+ "step": 4
52
+ },
53
+ {
54
+ "epoch": 2.0,
55
+ "eval_accuracy": 0.5,
56
+ "eval_f1_macro": 0.2222222222222222,
57
+ "eval_f1_micro": 0.5,
58
+ "eval_f1_weighted": 0.3333333333333333,
59
+ "eval_loss": 1.086669921875,
60
+ "eval_precision_macro": 0.16666666666666666,
61
+ "eval_precision_micro": 0.5,
62
+ "eval_precision_weighted": 0.25,
63
+ "eval_recall_macro": 0.3333333333333333,
64
+ "eval_recall_micro": 0.5,
65
+ "eval_recall_weighted": 0.5,
66
+ "eval_runtime": 0.1558,
67
+ "eval_samples_per_second": 25.675,
68
+ "eval_steps_per_second": 6.419,
69
+ "step": 4
70
+ },
71
+ {
72
+ "epoch": 2.5,
73
+ "learning_rate": 8.888888888888889e-05,
74
+ "loss": 1.0785,
75
+ "step": 5
76
+ },
77
+ {
78
+ "epoch": 3.0,
79
+ "learning_rate": 8.333333333333334e-05,
80
+ "loss": 1.0745,
81
+ "step": 6
82
+ },
83
+ {
84
+ "epoch": 3.0,
85
+ "eval_accuracy": 0.75,
86
+ "eval_f1_macro": 0.6,
87
+ "eval_f1_micro": 0.75,
88
+ "eval_f1_weighted": 0.65,
89
+ "eval_loss": 1.07421875,
90
+ "eval_precision_macro": 0.5555555555555555,
91
+ "eval_precision_micro": 0.75,
92
+ "eval_precision_weighted": 0.5833333333333333,
93
+ "eval_recall_macro": 0.6666666666666666,
94
+ "eval_recall_micro": 0.75,
95
+ "eval_recall_weighted": 0.75,
96
+ "eval_runtime": 0.1106,
97
+ "eval_samples_per_second": 36.166,
98
+ "eval_steps_per_second": 9.042,
99
+ "step": 6
100
+ },
101
+ {
102
+ "epoch": 3.5,
103
+ "learning_rate": 7.777777777777778e-05,
104
+ "loss": 1.0503,
105
+ "step": 7
106
+ },
107
+ {
108
+ "epoch": 4.0,
109
+ "learning_rate": 7.222222222222222e-05,
110
+ "loss": 1.033,
111
+ "step": 8
112
+ },
113
+ {
114
+ "epoch": 4.0,
115
+ "eval_accuracy": 0.75,
116
+ "eval_f1_macro": 0.6,
117
+ "eval_f1_micro": 0.75,
118
+ "eval_f1_weighted": 0.65,
119
+ "eval_loss": 1.061279296875,
120
+ "eval_precision_macro": 0.5555555555555555,
121
+ "eval_precision_micro": 0.75,
122
+ "eval_precision_weighted": 0.5833333333333333,
123
+ "eval_recall_macro": 0.6666666666666666,
124
+ "eval_recall_micro": 0.75,
125
+ "eval_recall_weighted": 0.75,
126
+ "eval_runtime": 0.145,
127
+ "eval_samples_per_second": 27.586,
128
+ "eval_steps_per_second": 6.897,
129
+ "step": 8
130
+ },
131
+ {
132
+ "epoch": 4.5,
133
+ "learning_rate": 6.666666666666667e-05,
134
+ "loss": 1.0021,
135
+ "step": 9
136
+ },
137
+ {
138
+ "epoch": 5.0,
139
+ "learning_rate": 6.666666666666667e-05,
140
+ "loss": 1.0374,
141
+ "step": 10
142
+ },
143
+ {
144
+ "epoch": 5.0,
145
+ "eval_accuracy": 0.75,
146
+ "eval_f1_macro": 0.6,
147
+ "eval_f1_micro": 0.75,
148
+ "eval_f1_weighted": 0.65,
149
+ "eval_loss": 1.05322265625,
150
+ "eval_precision_macro": 0.5555555555555555,
151
+ "eval_precision_micro": 0.75,
152
+ "eval_precision_weighted": 0.5833333333333333,
153
+ "eval_recall_macro": 0.6666666666666666,
154
+ "eval_recall_micro": 0.75,
155
+ "eval_recall_weighted": 0.75,
156
+ "eval_runtime": 0.1047,
157
+ "eval_samples_per_second": 38.192,
158
+ "eval_steps_per_second": 9.548,
159
+ "step": 10
160
+ },
161
+ {
162
+ "epoch": 5.5,
163
+ "learning_rate": 6.111111111111112e-05,
164
+ "loss": 1.0021,
165
+ "step": 11
166
+ },
167
+ {
168
+ "epoch": 6.0,
169
+ "learning_rate": 5.555555555555556e-05,
170
+ "loss": 0.9912,
171
+ "step": 12
172
+ },
173
+ {
174
+ "epoch": 6.0,
175
+ "eval_accuracy": 0.75,
176
+ "eval_f1_macro": 0.6,
177
+ "eval_f1_micro": 0.75,
178
+ "eval_f1_weighted": 0.65,
179
+ "eval_loss": 1.0400390625,
180
+ "eval_precision_macro": 0.5555555555555555,
181
+ "eval_precision_micro": 0.75,
182
+ "eval_precision_weighted": 0.5833333333333333,
183
+ "eval_recall_macro": 0.6666666666666666,
184
+ "eval_recall_micro": 0.75,
185
+ "eval_recall_weighted": 0.75,
186
+ "eval_runtime": 0.1544,
187
+ "eval_samples_per_second": 25.903,
188
+ "eval_steps_per_second": 6.476,
189
+ "step": 12
190
+ },
191
+ {
192
+ "epoch": 6.5,
193
+ "learning_rate": 5e-05,
194
+ "loss": 0.969,
195
+ "step": 13
196
+ },
197
+ {
198
+ "epoch": 7.0,
199
+ "learning_rate": 4.4444444444444447e-05,
200
+ "loss": 0.9684,
201
+ "step": 14
202
+ },
203
+ {
204
+ "epoch": 7.0,
205
+ "eval_accuracy": 0.75,
206
+ "eval_f1_macro": 0.6,
207
+ "eval_f1_micro": 0.75,
208
+ "eval_f1_weighted": 0.65,
209
+ "eval_loss": 1.031494140625,
210
+ "eval_precision_macro": 0.5555555555555555,
211
+ "eval_precision_micro": 0.75,
212
+ "eval_precision_weighted": 0.5833333333333333,
213
+ "eval_recall_macro": 0.6666666666666666,
214
+ "eval_recall_micro": 0.75,
215
+ "eval_recall_weighted": 0.75,
216
+ "eval_runtime": 0.1058,
217
+ "eval_samples_per_second": 37.811,
218
+ "eval_steps_per_second": 9.453,
219
+ "step": 14
220
+ },
221
+ {
222
+ "epoch": 7.5,
223
+ "learning_rate": 3.888888888888889e-05,
224
+ "loss": 0.9493,
225
+ "step": 15
226
+ },
227
+ {
228
+ "epoch": 8.0,
229
+ "learning_rate": 3.3333333333333335e-05,
230
+ "loss": 0.9297,
231
+ "step": 16
232
+ },
233
+ {
234
+ "epoch": 8.0,
235
+ "eval_accuracy": 0.75,
236
+ "eval_f1_macro": 0.6,
237
+ "eval_f1_micro": 0.75,
238
+ "eval_f1_weighted": 0.65,
239
+ "eval_loss": 1.024658203125,
240
+ "eval_precision_macro": 0.5555555555555555,
241
+ "eval_precision_micro": 0.75,
242
+ "eval_precision_weighted": 0.5833333333333333,
243
+ "eval_recall_macro": 0.6666666666666666,
244
+ "eval_recall_micro": 0.75,
245
+ "eval_recall_weighted": 0.75,
246
+ "eval_runtime": 0.1145,
247
+ "eval_samples_per_second": 34.936,
248
+ "eval_steps_per_second": 8.734,
249
+ "step": 16
250
+ },
251
+ {
252
+ "epoch": 8.5,
253
+ "learning_rate": 2.777777777777778e-05,
254
+ "loss": 0.9229,
255
+ "step": 17
256
+ },
257
+ {
258
+ "epoch": 9.0,
259
+ "learning_rate": 2.2222222222222223e-05,
260
+ "loss": 0.9038,
261
+ "step": 18
262
+ },
263
+ {
264
+ "epoch": 9.0,
265
+ "eval_accuracy": 0.75,
266
+ "eval_f1_macro": 0.6,
267
+ "eval_f1_micro": 0.75,
268
+ "eval_f1_weighted": 0.65,
269
+ "eval_loss": 1.019775390625,
270
+ "eval_precision_macro": 0.5555555555555555,
271
+ "eval_precision_micro": 0.75,
272
+ "eval_precision_weighted": 0.5833333333333333,
273
+ "eval_recall_macro": 0.6666666666666666,
274
+ "eval_recall_micro": 0.75,
275
+ "eval_recall_weighted": 0.75,
276
+ "eval_runtime": 0.1741,
277
+ "eval_samples_per_second": 22.98,
278
+ "eval_steps_per_second": 5.745,
279
+ "step": 18
280
+ }
281
+ ],
282
+ "logging_steps": 1,
283
+ "max_steps": 20,
284
+ "num_input_tokens_seen": 0,
285
+ "num_train_epochs": 10,
286
+ "save_steps": 500,
287
+ "total_flos": 8369209888284672.0,
288
+ "train_batch_size": 8,
289
+ "trial_name": null,
290
+ "trial_params": null
291
+ }
checkpoint-18/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:845903e28a5cefa036825ea55ac382dad13e5d9b0e38e31e71cdbd7e7f9dc075
3
+ size 4664
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 3,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "11covered_with_a_quilt_and_only_the_head_exposed",
14
+ "1": "12covered_with_a_quilt_and_exposed_other_parts_of_the_body",
15
+ "2": "13has_nothing_to_do_with_11_and_12_above"
16
+ },
17
+ "image_size": 224,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "11covered_with_a_quilt_and_only_the_head_exposed": 0,
22
+ "12covered_with_a_quilt_and_exposed_other_parts_of_the_body": 1,
23
+ "13has_nothing_to_do_with_11_and_12_above": 2
24
+ },
25
+ "layer_norm_eps": 1e-12,
26
+ "model_type": "vit",
27
+ "num_attention_heads": 12,
28
+ "num_channels": 3,
29
+ "num_hidden_layers": 12,
30
+ "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
+ "qkv_bias": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.36.1"
35
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24233972a8514bb0b6b5c162543c50d29e3276514bb8a405f94d5a265d6eec72
3
+ size 343227052
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:845903e28a5cefa036825ea55ac382dad13e5d9b0e38e31e71cdbd7e7f9dc075
3
+ size 4664
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "autotrain-vit-base-2/autotrain-data",
3
+ "model": "google/vit-base-patch16-224",
4
+ "username": "ZhaoYoujia",
5
+ "lr": 0.0001,
6
+ "epochs": 10,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "autotrain-vit-base-2",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "ZhaoYoujia/autotrain-vit-base-2",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }