moooji commited on
Commit
49933bb
1 Parent(s): 36dead5

End of training

Browse files
Files changed (5) hide show
  1. README.md +7 -5
  2. all_results.json +12 -0
  3. eval_results.json +8 -0
  4. train_results.json +7 -0
  5. trainer_state.json +253 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: google/vit-large-patch16-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - imagefolder
@@ -14,7 +16,7 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: imagefolder
18
  type: imagefolder
19
  config: default
20
  split: train
@@ -22,7 +24,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.936190032603633
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +32,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # fashion-images-perspectives-vit-large-patch16-224-in21k
32
 
33
- This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2562
36
- - Accuracy: 0.9362
37
 
38
  ## Model description
39
 
 
2
  license: apache-2.0
3
  base_model: google/vit-large-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
  datasets:
9
  - imagefolder
 
16
  name: Image Classification
17
  type: image-classification
18
  dataset:
19
+ name: touchtech/fashion-images-perspectives
20
  type: imagefolder
21
  config: default
22
  split: train
 
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
+ value: 0.9315323707498836
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  # fashion-images-perspectives-vit-large-patch16-224-in21k
34
 
35
+ This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-perspectives dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 0.2543
38
+ - Accuracy: 0.9315
39
 
40
  ## Model description
41
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9315323707498836,
4
+ "eval_loss": 0.2543001174926758,
5
+ "eval_runtime": 329.8994,
6
+ "eval_samples_per_second": 13.016,
7
+ "eval_steps_per_second": 1.628,
8
+ "train_loss": 0.3327771868382842,
9
+ "train_runtime": 12171.5283,
10
+ "train_samples_per_second": 9.995,
11
+ "train_steps_per_second": 1.25
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9315323707498836,
4
+ "eval_loss": 0.2543001174926758,
5
+ "eval_runtime": 329.8994,
6
+ "eval_samples_per_second": 13.016,
7
+ "eval_steps_per_second": 1.628
8
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.3327771868382842,
4
+ "train_runtime": 12171.5283,
5
+ "train_samples_per_second": 9.995,
6
+ "train_steps_per_second": 1.25
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2543001174926758,
3
+ "best_model_checkpoint": "/workspace/training_output/perspectives-vit-large-patch16-224-in21k/checkpoint-12168",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15210,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "learning_rate": 1.9342537804076265e-05,
14
+ "loss": 0.9137,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.33,
19
+ "learning_rate": 1.8685075608152533e-05,
20
+ "loss": 0.5533,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.49,
25
+ "learning_rate": 1.80276134122288e-05,
26
+ "loss": 0.5205,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.66,
31
+ "learning_rate": 1.7370151216305063e-05,
32
+ "loss": 0.4745,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.82,
37
+ "learning_rate": 1.671268902038133e-05,
38
+ "loss": 0.4605,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.99,
43
+ "learning_rate": 1.6055226824457594e-05,
44
+ "loss": 0.4164,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 1.0,
49
+ "eval_accuracy": 0.9024219841639497,
50
+ "eval_loss": 0.28684180974960327,
51
+ "eval_runtime": 335.4974,
52
+ "eval_samples_per_second": 12.799,
53
+ "eval_steps_per_second": 1.601,
54
+ "step": 3042
55
+ },
56
+ {
57
+ "epoch": 1.15,
58
+ "learning_rate": 1.539776462853386e-05,
59
+ "loss": 0.3762,
60
+ "step": 3500
61
+ },
62
+ {
63
+ "epoch": 1.31,
64
+ "learning_rate": 1.4740302432610125e-05,
65
+ "loss": 0.3825,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 1.48,
70
+ "learning_rate": 1.4082840236686392e-05,
71
+ "loss": 0.3543,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.64,
76
+ "learning_rate": 1.3425378040762658e-05,
77
+ "loss": 0.3748,
78
+ "step": 5000
79
+ },
80
+ {
81
+ "epoch": 1.81,
82
+ "learning_rate": 1.2767915844838923e-05,
83
+ "loss": 0.3462,
84
+ "step": 5500
85
+ },
86
+ {
87
+ "epoch": 1.97,
88
+ "learning_rate": 1.2110453648915189e-05,
89
+ "loss": 0.3391,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 2.0,
94
+ "eval_accuracy": 0.904052165812762,
95
+ "eval_loss": 0.30549806356430054,
96
+ "eval_runtime": 334.7146,
97
+ "eval_samples_per_second": 12.829,
98
+ "eval_steps_per_second": 1.604,
99
+ "step": 6084
100
+ },
101
+ {
102
+ "epoch": 2.14,
103
+ "learning_rate": 1.1452991452991454e-05,
104
+ "loss": 0.3078,
105
+ "step": 6500
106
+ },
107
+ {
108
+ "epoch": 2.3,
109
+ "learning_rate": 1.0795529257067721e-05,
110
+ "loss": 0.2976,
111
+ "step": 7000
112
+ },
113
+ {
114
+ "epoch": 2.47,
115
+ "learning_rate": 1.0138067061143987e-05,
116
+ "loss": 0.3078,
117
+ "step": 7500
118
+ },
119
+ {
120
+ "epoch": 2.63,
121
+ "learning_rate": 9.48060486522025e-06,
122
+ "loss": 0.2773,
123
+ "step": 8000
124
+ },
125
+ {
126
+ "epoch": 2.79,
127
+ "learning_rate": 8.823142669296516e-06,
128
+ "loss": 0.2851,
129
+ "step": 8500
130
+ },
131
+ {
132
+ "epoch": 2.96,
133
+ "learning_rate": 8.165680473372781e-06,
134
+ "loss": 0.2836,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 3.0,
139
+ "eval_accuracy": 0.9180251513740102,
140
+ "eval_loss": 0.30707448720932007,
141
+ "eval_runtime": 337.2814,
142
+ "eval_samples_per_second": 12.731,
143
+ "eval_steps_per_second": 1.592,
144
+ "step": 9126
145
+ },
146
+ {
147
+ "epoch": 3.12,
148
+ "learning_rate": 7.508218277449047e-06,
149
+ "loss": 0.2507,
150
+ "step": 9500
151
+ },
152
+ {
153
+ "epoch": 3.29,
154
+ "learning_rate": 6.850756081525313e-06,
155
+ "loss": 0.2584,
156
+ "step": 10000
157
+ },
158
+ {
159
+ "epoch": 3.45,
160
+ "learning_rate": 6.193293885601579e-06,
161
+ "loss": 0.27,
162
+ "step": 10500
163
+ },
164
+ {
165
+ "epoch": 3.62,
166
+ "learning_rate": 5.535831689677844e-06,
167
+ "loss": 0.2792,
168
+ "step": 11000
169
+ },
170
+ {
171
+ "epoch": 3.78,
172
+ "learning_rate": 4.878369493754109e-06,
173
+ "loss": 0.2335,
174
+ "step": 11500
175
+ },
176
+ {
177
+ "epoch": 3.94,
178
+ "learning_rate": 4.220907297830375e-06,
179
+ "loss": 0.2292,
180
+ "step": 12000
181
+ },
182
+ {
183
+ "epoch": 4.0,
184
+ "eval_accuracy": 0.9315323707498836,
185
+ "eval_loss": 0.2543001174926758,
186
+ "eval_runtime": 337.9988,
187
+ "eval_samples_per_second": 12.704,
188
+ "eval_steps_per_second": 1.589,
189
+ "step": 12168
190
+ },
191
+ {
192
+ "epoch": 4.11,
193
+ "learning_rate": 3.563445101906641e-06,
194
+ "loss": 0.2234,
195
+ "step": 12500
196
+ },
197
+ {
198
+ "epoch": 4.27,
199
+ "learning_rate": 2.9059829059829063e-06,
200
+ "loss": 0.2069,
201
+ "step": 13000
202
+ },
203
+ {
204
+ "epoch": 4.44,
205
+ "learning_rate": 2.2485207100591717e-06,
206
+ "loss": 0.2118,
207
+ "step": 13500
208
+ },
209
+ {
210
+ "epoch": 4.6,
211
+ "learning_rate": 1.5910585141354374e-06,
212
+ "loss": 0.2041,
213
+ "step": 14000
214
+ },
215
+ {
216
+ "epoch": 4.77,
217
+ "learning_rate": 9.335963182117029e-07,
218
+ "loss": 0.2159,
219
+ "step": 14500
220
+ },
221
+ {
222
+ "epoch": 4.93,
223
+ "learning_rate": 2.7613412228796843e-07,
224
+ "loss": 0.1842,
225
+ "step": 15000
226
+ },
227
+ {
228
+ "epoch": 5.0,
229
+ "eval_accuracy": 0.936190032603633,
230
+ "eval_loss": 0.256198912858963,
231
+ "eval_runtime": 350.5885,
232
+ "eval_samples_per_second": 12.248,
233
+ "eval_steps_per_second": 1.532,
234
+ "step": 15210
235
+ },
236
+ {
237
+ "epoch": 5.0,
238
+ "step": 15210,
239
+ "total_flos": 3.332544438628823e+19,
240
+ "train_loss": 0.3327771868382842,
241
+ "train_runtime": 12171.5283,
242
+ "train_samples_per_second": 9.995,
243
+ "train_steps_per_second": 1.25
244
+ }
245
+ ],
246
+ "logging_steps": 500,
247
+ "max_steps": 15210,
248
+ "num_train_epochs": 5,
249
+ "save_steps": 500,
250
+ "total_flos": 3.332544438628823e+19,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }