moooji commited on
Commit
53bf1ac
1 Parent(s): 7a8feb8

End of training

Browse files
Files changed (5) hide show
  1. README.md +7 -5
  2. all_results.json +12 -0
  3. eval_results.json +8 -0
  4. train_results.json +7 -0
  5. trainer_state.json +253 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - imagefolder
@@ -14,7 +16,7 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: imagefolder
18
  type: imagefolder
19
  config: default
20
  split: train
@@ -22,7 +24,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.9229156963204471
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +32,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # fashion-images-perspectives
32
 
33
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2583
36
- - Accuracy: 0.9229
37
 
38
  ## Model description
39
 
 
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
  datasets:
9
  - imagefolder
 
16
  name: Image Classification
17
  type: image-classification
18
  dataset:
19
+ name: touchtech/fashion-images-perspectives
20
  type: imagefolder
21
  config: default
22
  split: train
 
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
+ value: 0.9268747088961341
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  # fashion-images-perspectives
34
 
35
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the touchtech/fashion-images-perspectives dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 0.2280
38
+ - Accuracy: 0.9269
39
 
40
  ## Model description
41
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9268747088961341,
4
+ "eval_loss": 0.22801454365253448,
5
+ "eval_runtime": 317.7864,
6
+ "eval_samples_per_second": 13.512,
7
+ "eval_steps_per_second": 1.69,
8
+ "train_loss": 0.4568727389203334,
9
+ "train_runtime": 10636.8544,
10
+ "train_samples_per_second": 11.437,
11
+ "train_steps_per_second": 1.43
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9268747088961341,
4
+ "eval_loss": 0.22801454365253448,
5
+ "eval_runtime": 317.7864,
6
+ "eval_samples_per_second": 13.512,
7
+ "eval_steps_per_second": 1.69
8
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.4568727389203334,
4
+ "train_runtime": 10636.8544,
5
+ "train_samples_per_second": 11.437,
6
+ "train_steps_per_second": 1.43
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.22801454365253448,
3
+ "best_model_checkpoint": "/workspace/training_output/perspectives/checkpoint-12168",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15210,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "learning_rate": 1.9342537804076265e-05,
14
+ "loss": 1.492,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.33,
19
+ "learning_rate": 1.8685075608152533e-05,
20
+ "loss": 0.9689,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.49,
25
+ "learning_rate": 1.80276134122288e-05,
26
+ "loss": 0.795,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.66,
31
+ "learning_rate": 1.7370151216305063e-05,
32
+ "loss": 0.6899,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.82,
37
+ "learning_rate": 1.671268902038133e-05,
38
+ "loss": 0.6365,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.99,
43
+ "learning_rate": 1.6055226824457594e-05,
44
+ "loss": 0.5677,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 1.0,
49
+ "eval_accuracy": 0.883791336748952,
50
+ "eval_loss": 0.39963653683662415,
51
+ "eval_runtime": 331.6914,
52
+ "eval_samples_per_second": 12.946,
53
+ "eval_steps_per_second": 1.619,
54
+ "step": 3042
55
+ },
56
+ {
57
+ "epoch": 1.15,
58
+ "learning_rate": 1.539776462853386e-05,
59
+ "loss": 0.53,
60
+ "step": 3500
61
+ },
62
+ {
63
+ "epoch": 1.31,
64
+ "learning_rate": 1.4740302432610125e-05,
65
+ "loss": 0.4905,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 1.48,
70
+ "learning_rate": 1.4082840236686392e-05,
71
+ "loss": 0.4689,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.64,
76
+ "learning_rate": 1.3425378040762658e-05,
77
+ "loss": 0.4579,
78
+ "step": 5000
79
+ },
80
+ {
81
+ "epoch": 1.81,
82
+ "learning_rate": 1.2767915844838923e-05,
83
+ "loss": 0.4237,
84
+ "step": 5500
85
+ },
86
+ {
87
+ "epoch": 1.97,
88
+ "learning_rate": 1.2110453648915189e-05,
89
+ "loss": 0.4259,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 2.0,
94
+ "eval_accuracy": 0.8747088961341407,
95
+ "eval_loss": 0.3984226882457733,
96
+ "eval_runtime": 367.0798,
97
+ "eval_samples_per_second": 11.698,
98
+ "eval_steps_per_second": 1.463,
99
+ "step": 6084
100
+ },
101
+ {
102
+ "epoch": 2.14,
103
+ "learning_rate": 1.1452991452991454e-05,
104
+ "loss": 0.3722,
105
+ "step": 6500
106
+ },
107
+ {
108
+ "epoch": 2.3,
109
+ "learning_rate": 1.0795529257067721e-05,
110
+ "loss": 0.3872,
111
+ "step": 7000
112
+ },
113
+ {
114
+ "epoch": 2.47,
115
+ "learning_rate": 1.0138067061143987e-05,
116
+ "loss": 0.3768,
117
+ "step": 7500
118
+ },
119
+ {
120
+ "epoch": 2.63,
121
+ "learning_rate": 9.48060486522025e-06,
122
+ "loss": 0.3478,
123
+ "step": 8000
124
+ },
125
+ {
126
+ "epoch": 2.79,
127
+ "learning_rate": 8.823142669296516e-06,
128
+ "loss": 0.3586,
129
+ "step": 8500
130
+ },
131
+ {
132
+ "epoch": 2.96,
133
+ "learning_rate": 8.165680473372781e-06,
134
+ "loss": 0.3448,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 3.0,
139
+ "eval_accuracy": 0.9189566837447601,
140
+ "eval_loss": 0.2590956389904022,
141
+ "eval_runtime": 345.333,
142
+ "eval_samples_per_second": 12.434,
143
+ "eval_steps_per_second": 1.555,
144
+ "step": 9126
145
+ },
146
+ {
147
+ "epoch": 3.12,
148
+ "learning_rate": 7.508218277449047e-06,
149
+ "loss": 0.3242,
150
+ "step": 9500
151
+ },
152
+ {
153
+ "epoch": 3.29,
154
+ "learning_rate": 6.850756081525313e-06,
155
+ "loss": 0.3168,
156
+ "step": 10000
157
+ },
158
+ {
159
+ "epoch": 3.45,
160
+ "learning_rate": 6.193293885601579e-06,
161
+ "loss": 0.3266,
162
+ "step": 10500
163
+ },
164
+ {
165
+ "epoch": 3.62,
166
+ "learning_rate": 5.535831689677844e-06,
167
+ "loss": 0.3492,
168
+ "step": 11000
169
+ },
170
+ {
171
+ "epoch": 3.78,
172
+ "learning_rate": 4.878369493754109e-06,
173
+ "loss": 0.3307,
174
+ "step": 11500
175
+ },
176
+ {
177
+ "epoch": 3.94,
178
+ "learning_rate": 4.220907297830375e-06,
179
+ "loss": 0.3094,
180
+ "step": 12000
181
+ },
182
+ {
183
+ "epoch": 4.0,
184
+ "eval_accuracy": 0.9268747088961341,
185
+ "eval_loss": 0.22801454365253448,
186
+ "eval_runtime": 333.8049,
187
+ "eval_samples_per_second": 12.864,
188
+ "eval_steps_per_second": 1.609,
189
+ "step": 12168
190
+ },
191
+ {
192
+ "epoch": 4.11,
193
+ "learning_rate": 3.563445101906641e-06,
194
+ "loss": 0.2925,
195
+ "step": 12500
196
+ },
197
+ {
198
+ "epoch": 4.27,
199
+ "learning_rate": 2.9059829059829063e-06,
200
+ "loss": 0.2796,
201
+ "step": 13000
202
+ },
203
+ {
204
+ "epoch": 4.44,
205
+ "learning_rate": 2.2485207100591717e-06,
206
+ "loss": 0.2927,
207
+ "step": 13500
208
+ },
209
+ {
210
+ "epoch": 4.6,
211
+ "learning_rate": 1.5910585141354374e-06,
212
+ "loss": 0.29,
213
+ "step": 14000
214
+ },
215
+ {
216
+ "epoch": 4.77,
217
+ "learning_rate": 9.335963182117029e-07,
218
+ "loss": 0.3004,
219
+ "step": 14500
220
+ },
221
+ {
222
+ "epoch": 4.93,
223
+ "learning_rate": 2.7613412228796843e-07,
224
+ "loss": 0.2449,
225
+ "step": 15000
226
+ },
227
+ {
228
+ "epoch": 5.0,
229
+ "eval_accuracy": 0.9229156963204471,
230
+ "eval_loss": 0.2583230435848236,
231
+ "eval_runtime": 324.7812,
232
+ "eval_samples_per_second": 13.221,
233
+ "eval_steps_per_second": 1.653,
234
+ "step": 15210
235
+ },
236
+ {
237
+ "epoch": 5.0,
238
+ "step": 15210,
239
+ "total_flos": 9.427914420548198e+18,
240
+ "train_loss": 0.4568727389203334,
241
+ "train_runtime": 10636.8544,
242
+ "train_samples_per_second": 11.437,
243
+ "train_steps_per_second": 1.43
244
+ }
245
+ ],
246
+ "logging_steps": 500,
247
+ "max_steps": 15210,
248
+ "num_train_epochs": 5,
249
+ "save_steps": 500,
250
+ "total_flos": 9.427914420548198e+18,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }