moooji commited on
Commit
4412eff
1 Parent(s): 41bc9d2

End of training

Browse files
Files changed (5) hide show
  1. README.md +7 -5
  2. all_results.json +12 -0
  3. eval_results.json +8 -0
  4. train_results.json +7 -0
  5. trainer_state.json +253 -0
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: google/vit-large-patch16-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - imagefolder
@@ -14,7 +16,7 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: imagefolder
18
  type: imagefolder
19
  config: default
20
  split: train
@@ -22,7 +24,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.9356252884171666
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +32,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # fashion-images-perspectives-vit-large-patch16-224-in21k-v3
32
 
33
- This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2444
36
- - Accuracy: 0.9356
37
 
38
  ## Model description
39
 
 
2
  license: apache-2.0
3
  base_model: google/vit-large-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
  datasets:
9
  - imagefolder
 
16
  name: Image Classification
17
  type: image-classification
18
  dataset:
19
+ name: touchtech/fashion-images-perspectives
20
  type: imagefolder
21
  config: default
22
  split: train
 
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
+ value: 0.92455006922012
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  # fashion-images-perspectives-vit-large-patch16-224-in21k-v3
34
 
35
+ This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-perspectives dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 0.2419
38
+ - Accuracy: 0.9246
39
 
40
  ## Model description
41
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.92455006922012,
4
+ "eval_loss": 0.24191519618034363,
5
+ "eval_runtime": 317.7384,
6
+ "eval_samples_per_second": 13.64,
7
+ "eval_steps_per_second": 1.706,
8
+ "train_loss": 0.3319820684874097,
9
+ "train_runtime": 13677.2564,
10
+ "train_samples_per_second": 8.977,
11
+ "train_steps_per_second": 1.122
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.92455006922012,
4
+ "eval_loss": 0.24191519618034363,
5
+ "eval_runtime": 317.7384,
6
+ "eval_samples_per_second": 13.64,
7
+ "eval_steps_per_second": 1.706
8
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.3319820684874097,
4
+ "train_runtime": 13677.2564,
5
+ "train_samples_per_second": 8.977,
6
+ "train_steps_per_second": 1.122
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.24191519618034363,
3
+ "best_model_checkpoint": "/workspace/training_output/perspectives-vit-large-patch16-224-in21k-v3/checkpoint-9210",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15350,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "learning_rate": 1.93485342019544e-05,
14
+ "loss": 0.9017,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.33,
19
+ "learning_rate": 1.8697068403908796e-05,
20
+ "loss": 0.5698,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.49,
25
+ "learning_rate": 1.8045602605863193e-05,
26
+ "loss": 0.4974,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.65,
31
+ "learning_rate": 1.7394136807817594e-05,
32
+ "loss": 0.4646,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.81,
37
+ "learning_rate": 1.6742671009771988e-05,
38
+ "loss": 0.4538,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.98,
43
+ "learning_rate": 1.6091205211726385e-05,
44
+ "loss": 0.4353,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 1.0,
49
+ "eval_accuracy": 0.8811721273650208,
50
+ "eval_loss": 0.33953866362571716,
51
+ "eval_runtime": 349.0715,
52
+ "eval_samples_per_second": 12.416,
53
+ "eval_steps_per_second": 1.553,
54
+ "step": 3070
55
+ },
56
+ {
57
+ "epoch": 1.14,
58
+ "learning_rate": 1.5439739413680782e-05,
59
+ "loss": 0.3613,
60
+ "step": 3500
61
+ },
62
+ {
63
+ "epoch": 1.3,
64
+ "learning_rate": 1.478827361563518e-05,
65
+ "loss": 0.3571,
66
+ "step": 4000
67
+ },
68
+ {
69
+ "epoch": 1.47,
70
+ "learning_rate": 1.4136807817589578e-05,
71
+ "loss": 0.3861,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.63,
76
+ "learning_rate": 1.3485342019543975e-05,
77
+ "loss": 0.3657,
78
+ "step": 5000
79
+ },
80
+ {
81
+ "epoch": 1.79,
82
+ "learning_rate": 1.2833876221498372e-05,
83
+ "loss": 0.3615,
84
+ "step": 5500
85
+ },
86
+ {
87
+ "epoch": 1.95,
88
+ "learning_rate": 1.2182410423452771e-05,
89
+ "loss": 0.3415,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 2.0,
94
+ "eval_accuracy": 0.9192431933548685,
95
+ "eval_loss": 0.25435712933540344,
96
+ "eval_runtime": 357.646,
97
+ "eval_samples_per_second": 12.118,
98
+ "eval_steps_per_second": 1.515,
99
+ "step": 6140
100
+ },
101
+ {
102
+ "epoch": 2.12,
103
+ "learning_rate": 1.1530944625407168e-05,
104
+ "loss": 0.3178,
105
+ "step": 6500
106
+ },
107
+ {
108
+ "epoch": 2.28,
109
+ "learning_rate": 1.0879478827361565e-05,
110
+ "loss": 0.2954,
111
+ "step": 7000
112
+ },
113
+ {
114
+ "epoch": 2.44,
115
+ "learning_rate": 1.0228013029315963e-05,
116
+ "loss": 0.3127,
117
+ "step": 7500
118
+ },
119
+ {
120
+ "epoch": 2.61,
121
+ "learning_rate": 9.576547231270358e-06,
122
+ "loss": 0.3116,
123
+ "step": 8000
124
+ },
125
+ {
126
+ "epoch": 2.77,
127
+ "learning_rate": 8.925081433224755e-06,
128
+ "loss": 0.2747,
129
+ "step": 8500
130
+ },
131
+ {
132
+ "epoch": 2.93,
133
+ "learning_rate": 8.273615635179154e-06,
134
+ "loss": 0.2689,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 3.0,
139
+ "eval_accuracy": 0.92455006922012,
140
+ "eval_loss": 0.24191519618034363,
141
+ "eval_runtime": 319.492,
142
+ "eval_samples_per_second": 13.565,
143
+ "eval_steps_per_second": 1.696,
144
+ "step": 9210
145
+ },
146
+ {
147
+ "epoch": 3.09,
148
+ "learning_rate": 7.622149837133551e-06,
149
+ "loss": 0.2727,
150
+ "step": 9500
151
+ },
152
+ {
153
+ "epoch": 3.26,
154
+ "learning_rate": 6.9706840390879485e-06,
155
+ "loss": 0.2362,
156
+ "step": 10000
157
+ },
158
+ {
159
+ "epoch": 3.42,
160
+ "learning_rate": 6.319218241042345e-06,
161
+ "loss": 0.2617,
162
+ "step": 10500
163
+ },
164
+ {
165
+ "epoch": 3.58,
166
+ "learning_rate": 5.667752442996744e-06,
167
+ "loss": 0.2437,
168
+ "step": 11000
169
+ },
170
+ {
171
+ "epoch": 3.75,
172
+ "learning_rate": 5.016286644951141e-06,
173
+ "loss": 0.2494,
174
+ "step": 11500
175
+ },
176
+ {
177
+ "epoch": 3.91,
178
+ "learning_rate": 4.364820846905538e-06,
179
+ "loss": 0.2525,
180
+ "step": 12000
181
+ },
182
+ {
183
+ "epoch": 4.0,
184
+ "eval_accuracy": 0.9192431933548685,
185
+ "eval_loss": 0.29525890946388245,
186
+ "eval_runtime": 322.9797,
187
+ "eval_samples_per_second": 13.419,
188
+ "eval_steps_per_second": 1.678,
189
+ "step": 12280
190
+ },
191
+ {
192
+ "epoch": 4.07,
193
+ "learning_rate": 3.7133550488599353e-06,
194
+ "loss": 0.2265,
195
+ "step": 12500
196
+ },
197
+ {
198
+ "epoch": 4.23,
199
+ "learning_rate": 3.061889250814333e-06,
200
+ "loss": 0.204,
201
+ "step": 13000
202
+ },
203
+ {
204
+ "epoch": 4.4,
205
+ "learning_rate": 2.4104234527687296e-06,
206
+ "loss": 0.2261,
207
+ "step": 13500
208
+ },
209
+ {
210
+ "epoch": 4.56,
211
+ "learning_rate": 1.7589576547231272e-06,
212
+ "loss": 0.2152,
213
+ "step": 14000
214
+ },
215
+ {
216
+ "epoch": 4.72,
217
+ "learning_rate": 1.1074918566775244e-06,
218
+ "loss": 0.1766,
219
+ "step": 14500
220
+ },
221
+ {
222
+ "epoch": 4.89,
223
+ "learning_rate": 4.5602605863192187e-07,
224
+ "loss": 0.1977,
225
+ "step": 15000
226
+ },
227
+ {
228
+ "epoch": 5.0,
229
+ "eval_accuracy": 0.9356252884171666,
230
+ "eval_loss": 0.24443966150283813,
231
+ "eval_runtime": 321.057,
232
+ "eval_samples_per_second": 13.499,
233
+ "eval_steps_per_second": 1.688,
234
+ "step": 15350
235
+ },
236
+ {
237
+ "epoch": 5.0,
238
+ "step": 15350,
239
+ "total_flos": 3.363363283622308e+19,
240
+ "train_loss": 0.3319820684874097,
241
+ "train_runtime": 13677.2564,
242
+ "train_samples_per_second": 8.977,
243
+ "train_steps_per_second": 1.122
244
+ }
245
+ ],
246
+ "logging_steps": 500,
247
+ "max_steps": 15350,
248
+ "num_train_epochs": 5,
249
+ "save_steps": 500,
250
+ "total_flos": 3.363363283622308e+19,
251
+ "trial_name": null,
252
+ "trial_params": null
253
+ }