moooji commited on
Commit
dd2cbb2
1 Parent(s): c45eef0

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -3
  2. all_results.json +12 -0
  3. eval_results.json +8 -0
  4. train_results.json +7 -0
  5. trainer_state.json +253 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: google/vit-huge-patch14-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - imagefolder
@@ -14,7 +16,7 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: imagefolder
18
  type: imagefolder
19
  config: default
20
  split: train
@@ -30,9 +32,9 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # fashion-images-perspectives-vit-huge-patch14-224-in21k
32
 
33
- This model is a fine-tuned version of [google/vit-huge-patch14-224-in21k](https://huggingface.co/google/vit-huge-patch14-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2710
36
  - Accuracy: 0.9150
37
 
38
  ## Model description
 
2
  license: apache-2.0
3
  base_model: google/vit-huge-patch14-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
  datasets:
9
  - imagefolder
 
16
  name: Image Classification
17
  type: image-classification
18
  dataset:
19
+ name: touchtech/fashion-images-perspectives
20
  type: imagefolder
21
  config: default
22
  split: train
 
32
 
33
  # fashion-images-perspectives-vit-huge-patch14-224-in21k
34
 
35
+ This model is a fine-tuned version of [google/vit-huge-patch14-224-in21k](https://huggingface.co/google/vit-huge-patch14-224-in21k) on the touchtech/fashion-images-perspectives dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 0.2604
38
  - Accuracy: 0.9150
39
 
40
  ## Model description
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9149976711690732,
4
+ "eval_loss": 0.26038262248039246,
5
+ "eval_runtime": 354.8337,
6
+ "eval_samples_per_second": 12.101,
7
+ "eval_steps_per_second": 1.513,
8
+ "train_loss": 0.4780347077960955,
9
+ "train_runtime": 18614.0668,
10
+ "train_samples_per_second": 6.535,
11
+ "train_steps_per_second": 0.817
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9149976711690732,
4
+ "eval_loss": 0.26038262248039246,
5
+ "eval_runtime": 354.8337,
6
+ "eval_samples_per_second": 12.101,
7
+ "eval_steps_per_second": 1.513
8
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.4780347077960955,
4
+ "train_runtime": 18614.0668,
5
+ "train_samples_per_second": 6.535,
6
+ "train_steps_per_second": 0.817
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.26038262248039246,
3
+ "best_model_checkpoint": "/workspace/training_output/perspectives-vit-huge-patch14-224-in21k/checkpoint-12168",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15210,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "learning_rate": 1.9342537804076265e-05,
14
+ "loss": 1.561,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.33,
19
+ "learning_rate": 1.8685075608152533e-05,
20
+ "loss": 1.008,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.49,
25
+ "learning_rate": 1.80276134122288e-05,
26
+ "loss": 0.8167,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.66,
31
+ "learning_rate": 1.7370151216305063e-05,
32
+ "loss": 0.717,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.82,
37
+ "learning_rate": 1.671268902038133e-05,
38
+ "loss": 0.6546,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.99,
43
+ "learning_rate": 1.6055226824457594e-05,
44
+ "loss": 0.5932,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 1.0,
49
+ "eval_accuracy": 0.8889147647880764,
50
+ "eval_loss": 0.3862580955028534,
51
+ "eval_runtime": 426.1609,
52
+ "eval_samples_per_second": 10.076,
53
+ "eval_steps_per_second": 1.26,
54
+ "step": 3042
55
+ },
56
+ {
57
+ "epoch": 1.15,
58
+ "learning_rate": 1.539776462853386e-05,
59
+ "loss": 0.5426,
60
+ "step": 3500
61
+ },
62
+ {
63
+ "epoch": 1.31,
64
+ "learning_rate": 1.4740302432610125e-05,
65
+ "loss": 0.5235,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 1.48,
70
+ "learning_rate": 1.4082840236686392e-05,
71
+ "loss": 0.5008,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.64,
76
+ "learning_rate": 1.3425378040762658e-05,
77
+ "loss": 0.4795,
78
+ "step": 5000
79
+ },
80
+ {
81
+ "epoch": 1.81,
82
+ "learning_rate": 1.2767915844838923e-05,
83
+ "loss": 0.4353,
84
+ "step": 5500
85
+ },
86
+ {
87
+ "epoch": 1.97,
88
+ "learning_rate": 1.2110453648915189e-05,
89
+ "loss": 0.4286,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 2.0,
94
+ "eval_accuracy": 0.8642291569632045,
95
+ "eval_loss": 0.4201112985610962,
96
+ "eval_runtime": 405.9091,
97
+ "eval_samples_per_second": 10.579,
98
+ "eval_steps_per_second": 1.323,
99
+ "step": 6084
100
+ },
101
+ {
102
+ "epoch": 2.14,
103
+ "learning_rate": 1.1452991452991454e-05,
104
+ "loss": 0.4019,
105
+ "step": 6500
106
+ },
107
+ {
108
+ "epoch": 2.3,
109
+ "learning_rate": 1.0795529257067721e-05,
110
+ "loss": 0.4,
111
+ "step": 7000
112
+ },
113
+ {
114
+ "epoch": 2.47,
115
+ "learning_rate": 1.0138067061143987e-05,
116
+ "loss": 0.3997,
117
+ "step": 7500
118
+ },
119
+ {
120
+ "epoch": 2.63,
121
+ "learning_rate": 9.48060486522025e-06,
122
+ "loss": 0.3644,
123
+ "step": 8000
124
+ },
125
+ {
126
+ "epoch": 2.79,
127
+ "learning_rate": 8.823142669296516e-06,
128
+ "loss": 0.3684,
129
+ "step": 8500
130
+ },
131
+ {
132
+ "epoch": 2.96,
133
+ "learning_rate": 8.165680473372781e-06,
134
+ "loss": 0.3628,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 3.0,
139
+ "eval_accuracy": 0.9101071262226362,
140
+ "eval_loss": 0.28201931715011597,
141
+ "eval_runtime": 434.5616,
142
+ "eval_samples_per_second": 9.881,
143
+ "eval_steps_per_second": 1.236,
144
+ "step": 9126
145
+ },
146
+ {
147
+ "epoch": 3.12,
148
+ "learning_rate": 7.508218277449047e-06,
149
+ "loss": 0.3414,
150
+ "step": 9500
151
+ },
152
+ {
153
+ "epoch": 3.29,
154
+ "learning_rate": 6.850756081525313e-06,
155
+ "loss": 0.3458,
156
+ "step": 10000
157
+ },
158
+ {
159
+ "epoch": 3.45,
160
+ "learning_rate": 6.193293885601579e-06,
161
+ "loss": 0.3484,
162
+ "step": 10500
163
+ },
164
+ {
165
+ "epoch": 3.62,
166
+ "learning_rate": 5.535831689677844e-06,
167
+ "loss": 0.3679,
168
+ "step": 11000
169
+ },
170
+ {
171
+ "epoch": 3.78,
172
+ "learning_rate": 4.878369493754109e-06,
173
+ "loss": 0.328,
174
+ "step": 11500
175
+ },
176
+ {
177
+ "epoch": 3.94,
178
+ "learning_rate": 4.220907297830375e-06,
179
+ "loss": 0.3183,
180
+ "step": 12000
181
+ },
182
+ {
183
+ "epoch": 4.0,
184
+ "eval_accuracy": 0.9149976711690732,
185
+ "eval_loss": 0.26038262248039246,
186
+ "eval_runtime": 392.7229,
187
+ "eval_samples_per_second": 10.934,
188
+ "eval_steps_per_second": 1.367,
189
+ "step": 12168
190
+ },
191
+ {
192
+ "epoch": 4.11,
193
+ "learning_rate": 3.563445101906641e-06,
194
+ "loss": 0.3123,
195
+ "step": 12500
196
+ },
197
+ {
198
+ "epoch": 4.27,
199
+ "learning_rate": 2.9059829059829063e-06,
200
+ "loss": 0.309,
201
+ "step": 13000
202
+ },
203
+ {
204
+ "epoch": 4.44,
205
+ "learning_rate": 2.2485207100591717e-06,
206
+ "loss": 0.2992,
207
+ "step": 13500
208
+ },
209
+ {
210
+ "epoch": 4.6,
211
+ "learning_rate": 1.5910585141354374e-06,
212
+ "loss": 0.3119,
213
+ "step": 14000
214
+ },
215
+ {
216
+ "epoch": 4.77,
217
+ "learning_rate": 9.335963182117029e-07,
218
+ "loss": 0.3192,
219
+ "step": 14500
220
+ },
221
+ {
222
+ "epoch": 4.93,
223
+ "learning_rate": 2.7613412228796843e-07,
224
+ "loss": 0.2648,
225
+ "step": 15000
226
+ },
227
+ {
228
+ "epoch": 5.0,
229
+ "eval_accuracy": 0.9149976711690732,
230
+ "eval_loss": 0.27095553278923035,
231
+ "eval_runtime": 390.7206,
232
+ "eval_samples_per_second": 10.99,
233
+ "eval_steps_per_second": 1.374,
234
+ "step": 15210
235
+ },
236
+ {
237
+ "epoch": 5.0,
238
+ "step": 15210,
239
+ "total_flos": 6.9304343223654605e+19,
240
+ "train_loss": 0.4780347077960955,
241
+ "train_runtime": 18614.0668,
242
+ "train_samples_per_second": 6.535,
243
+ "train_steps_per_second": 0.817
244
+ }
245
+ ],
246
+ "logging_steps": 500,
247
+ "max_steps": 15210,
248
+ "num_train_epochs": 5,
249
+ "save_steps": 500,
250
+ "total_flos": 6.9304343223654605e+19,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }