moooji commited on
Commit
2cb1342
1 Parent(s): c9fc1d1

End of training

Browse files
Files changed (5) hide show
  1. README.md +4 -2
  2. all_results.json +12 -0
  3. eval_results.json +8 -0
  4. train_results.json +7 -0
  5. trainer_state.json +253 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: google/vit-large-patch16-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - imagefolder
@@ -14,7 +16,7 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: imagefolder
18
  type: imagefolder
19
  config: default
20
  split: train
@@ -30,7 +32,7 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # fashion-images-perspectives-vit-large-patch16-224-in21k-v4
32
 
33
- This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 0.2203
36
  - Accuracy: 0.9434
 
2
  license: apache-2.0
3
  base_model: google/vit-large-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
  datasets:
9
  - imagefolder
 
16
  name: Image Classification
17
  type: image-classification
18
  dataset:
19
+ name: touchtech/fashion-images-perspectives
20
  type: imagefolder
21
  config: default
22
  split: train
 
32
 
33
  # fashion-images-perspectives-vit-large-patch16-224-in21k-v4
34
 
35
+ This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-perspectives dataset.
36
  It achieves the following results on the evaluation set:
37
  - Loss: 0.2203
38
  - Accuracy: 0.9434
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9434482758620689,
4
+ "eval_loss": 0.22029058635234833,
5
+ "eval_runtime": 314.6406,
6
+ "eval_samples_per_second": 13.825,
7
+ "eval_steps_per_second": 1.729,
8
+ "train_loss": 0.33228889720388066,
9
+ "train_runtime": 12005.4433,
10
+ "train_samples_per_second": 10.264,
11
+ "train_steps_per_second": 1.283
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9434482758620689,
4
+ "eval_loss": 0.22029058635234833,
5
+ "eval_runtime": 314.6406,
6
+ "eval_samples_per_second": 13.825,
7
+ "eval_steps_per_second": 1.729
8
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.33228889720388066,
4
+ "train_runtime": 12005.4433,
5
+ "train_samples_per_second": 10.264,
6
+ "train_steps_per_second": 1.283
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.22029058635234833,
3
+ "best_model_checkpoint": "/workspace/training_output/perspectives-vit-large-patch16-224-in21k-v4/checkpoint-15405",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15405,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "learning_rate": 1.9350860110353782e-05,
14
+ "loss": 0.887,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.32,
19
+ "learning_rate": 1.8701720220707566e-05,
20
+ "loss": 0.5859,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.49,
25
+ "learning_rate": 1.8052580331061346e-05,
26
+ "loss": 0.5192,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.65,
31
+ "learning_rate": 1.7403440441415126e-05,
32
+ "loss": 0.4758,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.81,
37
+ "learning_rate": 1.6754300551768907e-05,
38
+ "loss": 0.4477,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.97,
43
+ "learning_rate": 1.610516066212269e-05,
44
+ "loss": 0.4275,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 1.0,
49
+ "eval_accuracy": 0.9011494252873563,
50
+ "eval_loss": 0.306373655796051,
51
+ "eval_runtime": 350.0887,
52
+ "eval_samples_per_second": 12.425,
53
+ "eval_steps_per_second": 1.554,
54
+ "step": 3081
55
+ },
56
+ {
57
+ "epoch": 1.14,
58
+ "learning_rate": 1.545602077247647e-05,
59
+ "loss": 0.3785,
60
+ "step": 3500
61
+ },
62
+ {
63
+ "epoch": 1.3,
64
+ "learning_rate": 1.480688088283025e-05,
65
+ "loss": 0.3531,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 1.46,
70
+ "learning_rate": 1.4157740993184033e-05,
71
+ "loss": 0.3636,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.62,
76
+ "learning_rate": 1.3508601103537813e-05,
77
+ "loss": 0.3663,
78
+ "step": 5000
79
+ },
80
+ {
81
+ "epoch": 1.79,
82
+ "learning_rate": 1.2859461213891595e-05,
83
+ "loss": 0.345,
84
+ "step": 5500
85
+ },
86
+ {
87
+ "epoch": 1.95,
88
+ "learning_rate": 1.2210321324245376e-05,
89
+ "loss": 0.3555,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 2.0,
94
+ "eval_accuracy": 0.9103448275862069,
95
+ "eval_loss": 0.3097192347049713,
96
+ "eval_runtime": 317.5331,
97
+ "eval_samples_per_second": 13.699,
98
+ "eval_steps_per_second": 1.713,
99
+ "step": 6162
100
+ },
101
+ {
102
+ "epoch": 2.11,
103
+ "learning_rate": 1.1561181434599158e-05,
104
+ "loss": 0.2992,
105
+ "step": 6500
106
+ },
107
+ {
108
+ "epoch": 2.27,
109
+ "learning_rate": 1.0912041544952938e-05,
110
+ "loss": 0.3178,
111
+ "step": 7000
112
+ },
113
+ {
114
+ "epoch": 2.43,
115
+ "learning_rate": 1.0262901655306718e-05,
116
+ "loss": 0.3011,
117
+ "step": 7500
118
+ },
119
+ {
120
+ "epoch": 2.6,
121
+ "learning_rate": 9.6137617656605e-06,
122
+ "loss": 0.2823,
123
+ "step": 8000
124
+ },
125
+ {
126
+ "epoch": 2.76,
127
+ "learning_rate": 8.964621876014283e-06,
128
+ "loss": 0.2864,
129
+ "step": 8500
130
+ },
131
+ {
132
+ "epoch": 2.92,
133
+ "learning_rate": 8.315481986368063e-06,
134
+ "loss": 0.3069,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 3.0,
139
+ "eval_accuracy": 0.9105747126436782,
140
+ "eval_loss": 0.30360889434814453,
141
+ "eval_runtime": 336.8305,
142
+ "eval_samples_per_second": 12.915,
143
+ "eval_steps_per_second": 1.615,
144
+ "step": 9243
145
+ },
146
+ {
147
+ "epoch": 3.08,
148
+ "learning_rate": 7.666342096721843e-06,
149
+ "loss": 0.2615,
150
+ "step": 9500
151
+ },
152
+ {
153
+ "epoch": 3.25,
154
+ "learning_rate": 7.017202207075625e-06,
155
+ "loss": 0.2504,
156
+ "step": 10000
157
+ },
158
+ {
159
+ "epoch": 3.41,
160
+ "learning_rate": 6.368062317429406e-06,
161
+ "loss": 0.247,
162
+ "step": 10500
163
+ },
164
+ {
165
+ "epoch": 3.57,
166
+ "learning_rate": 5.7189224277831876e-06,
167
+ "loss": 0.2531,
168
+ "step": 11000
169
+ },
170
+ {
171
+ "epoch": 3.73,
172
+ "learning_rate": 5.06978253813697e-06,
173
+ "loss": 0.222,
174
+ "step": 11500
175
+ },
176
+ {
177
+ "epoch": 3.89,
178
+ "learning_rate": 4.42064264849075e-06,
179
+ "loss": 0.2449,
180
+ "step": 12000
181
+ },
182
+ {
183
+ "epoch": 4.0,
184
+ "eval_accuracy": 0.9377011494252874,
185
+ "eval_loss": 0.22682134807109833,
186
+ "eval_runtime": 316.4362,
187
+ "eval_samples_per_second": 13.747,
188
+ "eval_steps_per_second": 1.719,
189
+ "step": 12324
190
+ },
191
+ {
192
+ "epoch": 4.06,
193
+ "learning_rate": 3.7715027588445315e-06,
194
+ "loss": 0.2261,
195
+ "step": 12500
196
+ },
197
+ {
198
+ "epoch": 4.22,
199
+ "learning_rate": 3.1223628691983127e-06,
200
+ "loss": 0.221,
201
+ "step": 13000
202
+ },
203
+ {
204
+ "epoch": 4.38,
205
+ "learning_rate": 2.473222979552094e-06,
206
+ "loss": 0.2247,
207
+ "step": 13500
208
+ },
209
+ {
210
+ "epoch": 4.54,
211
+ "learning_rate": 1.8240830899058748e-06,
212
+ "loss": 0.2037,
213
+ "step": 14000
214
+ },
215
+ {
216
+ "epoch": 4.71,
217
+ "learning_rate": 1.174943200259656e-06,
218
+ "loss": 0.2023,
219
+ "step": 14500
220
+ },
221
+ {
222
+ "epoch": 4.87,
223
+ "learning_rate": 5.258033106134373e-07,
224
+ "loss": 0.2339,
225
+ "step": 15000
226
+ },
227
+ {
228
+ "epoch": 5.0,
229
+ "eval_accuracy": 0.9434482758620689,
230
+ "eval_loss": 0.22029058635234833,
231
+ "eval_runtime": 317.0472,
232
+ "eval_samples_per_second": 13.72,
233
+ "eval_steps_per_second": 1.716,
234
+ "step": 15405
235
+ },
236
+ {
237
+ "epoch": 5.0,
238
+ "step": 15405,
239
+ "total_flos": 3.3756908216197018e+19,
240
+ "train_loss": 0.33228889720388066,
241
+ "train_runtime": 12005.4433,
242
+ "train_samples_per_second": 10.264,
243
+ "train_steps_per_second": 1.283
244
+ }
245
+ ],
246
+ "logging_steps": 500,
247
+ "max_steps": 15405,
248
+ "num_train_epochs": 5,
249
+ "save_steps": 500,
250
+ "total_flos": 3.3756908216197018e+19,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }