maurya22 commited on
Commit
8282257
1 Parent(s): 3b13e36

Upload folder using huggingface_hub

Browse files
checkpoint-30/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "signature",
13
+ "1": "photo"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "photo": 1,
20
+ "signature": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.33.3"
32
+ }
checkpoint-30/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:137255b2aad9bdf5e6656cbc6e828e30edcfc8feb8a0d5600514024fdcd402bb
3
+ size 686562821
checkpoint-30/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-30/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5658fba10a875b4ec7312f364e80d131fa728fdc79fe782c1a3dce1a1a6c3c4f
3
+ size 343265965
checkpoint-30/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b780fbe4629615114a26f5143f8e62aa1063fcd104b572215ccd2212ec22c7e3
3
+ size 13553
checkpoint-30/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c70a230664a9b28c19a81a4083677c13190e325d60cc4a4bd6cd8defe9f22329
3
+ size 627
checkpoint-30/trainer_state.json ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.546416163444519,
3
+ "best_model_checkpoint": "photo and signature classifier/checkpoint-30",
4
+ "epoch": 30.0,
5
+ "eval_steps": 500,
6
+ "global_step": 30,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 1.0,
14
+ "eval_loss": 0.6157850027084351,
15
+ "eval_runtime": 2.094,
16
+ "eval_samples_per_second": 0.955,
17
+ "eval_steps_per_second": 0.478,
18
+ "step": 1
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "eval_accuracy": 1.0,
23
+ "eval_loss": 0.6156324148178101,
24
+ "eval_runtime": 1.3188,
25
+ "eval_samples_per_second": 1.517,
26
+ "eval_steps_per_second": 0.758,
27
+ "step": 2
28
+ },
29
+ {
30
+ "epoch": 3.0,
31
+ "eval_accuracy": 1.0,
32
+ "eval_loss": 0.6153320074081421,
33
+ "eval_runtime": 2.1616,
34
+ "eval_samples_per_second": 0.925,
35
+ "eval_steps_per_second": 0.463,
36
+ "step": 3
37
+ },
38
+ {
39
+ "epoch": 4.0,
40
+ "eval_accuracy": 1.0,
41
+ "eval_loss": 0.6148884296417236,
42
+ "eval_runtime": 1.3743,
43
+ "eval_samples_per_second": 1.455,
44
+ "eval_steps_per_second": 0.728,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 5.0,
49
+ "eval_accuracy": 1.0,
50
+ "eval_loss": 0.6142956018447876,
51
+ "eval_runtime": 2.0558,
52
+ "eval_samples_per_second": 0.973,
53
+ "eval_steps_per_second": 0.486,
54
+ "step": 5
55
+ },
56
+ {
57
+ "epoch": 6.0,
58
+ "eval_accuracy": 1.0,
59
+ "eval_loss": 0.6135208010673523,
60
+ "eval_runtime": 3.4628,
61
+ "eval_samples_per_second": 0.578,
62
+ "eval_steps_per_second": 0.289,
63
+ "step": 6
64
+ },
65
+ {
66
+ "epoch": 7.0,
67
+ "eval_accuracy": 1.0,
68
+ "eval_loss": 0.6125726699829102,
69
+ "eval_runtime": 2.1386,
70
+ "eval_samples_per_second": 0.935,
71
+ "eval_steps_per_second": 0.468,
72
+ "step": 7
73
+ },
74
+ {
75
+ "epoch": 8.0,
76
+ "eval_accuracy": 1.0,
77
+ "eval_loss": 0.6114814281463623,
78
+ "eval_runtime": 1.372,
79
+ "eval_samples_per_second": 1.458,
80
+ "eval_steps_per_second": 0.729,
81
+ "step": 8
82
+ },
83
+ {
84
+ "epoch": 9.0,
85
+ "eval_accuracy": 1.0,
86
+ "eval_loss": 0.6102224588394165,
87
+ "eval_runtime": 2.2603,
88
+ "eval_samples_per_second": 0.885,
89
+ "eval_steps_per_second": 0.442,
90
+ "step": 9
91
+ },
92
+ {
93
+ "epoch": 10.0,
94
+ "eval_accuracy": 1.0,
95
+ "eval_loss": 0.6087806224822998,
96
+ "eval_runtime": 2.4561,
97
+ "eval_samples_per_second": 0.814,
98
+ "eval_steps_per_second": 0.407,
99
+ "step": 10
100
+ },
101
+ {
102
+ "epoch": 11.0,
103
+ "eval_accuracy": 1.0,
104
+ "eval_loss": 0.6071939468383789,
105
+ "eval_runtime": 1.3841,
106
+ "eval_samples_per_second": 1.445,
107
+ "eval_steps_per_second": 0.722,
108
+ "step": 11
109
+ },
110
+ {
111
+ "epoch": 12.0,
112
+ "eval_accuracy": 1.0,
113
+ "eval_loss": 0.6054439544677734,
114
+ "eval_runtime": 2.2773,
115
+ "eval_samples_per_second": 0.878,
116
+ "eval_steps_per_second": 0.439,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 13.0,
121
+ "eval_accuracy": 1.0,
122
+ "eval_loss": 0.603541374206543,
123
+ "eval_runtime": 1.3822,
124
+ "eval_samples_per_second": 1.447,
125
+ "eval_steps_per_second": 0.723,
126
+ "step": 13
127
+ },
128
+ {
129
+ "epoch": 14.0,
130
+ "eval_accuracy": 1.0,
131
+ "eval_loss": 0.6014397144317627,
132
+ "eval_runtime": 1.4331,
133
+ "eval_samples_per_second": 1.396,
134
+ "eval_steps_per_second": 0.698,
135
+ "step": 14
136
+ },
137
+ {
138
+ "epoch": 15.0,
139
+ "eval_accuracy": 1.0,
140
+ "eval_loss": 0.5991525650024414,
141
+ "eval_runtime": 1.4008,
142
+ "eval_samples_per_second": 1.428,
143
+ "eval_steps_per_second": 0.714,
144
+ "step": 15
145
+ },
146
+ {
147
+ "epoch": 16.0,
148
+ "eval_accuracy": 1.0,
149
+ "eval_loss": 0.5967353582382202,
150
+ "eval_runtime": 1.3847,
151
+ "eval_samples_per_second": 1.444,
152
+ "eval_steps_per_second": 0.722,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 17.0,
157
+ "eval_accuracy": 1.0,
158
+ "eval_loss": 0.5942363739013672,
159
+ "eval_runtime": 1.8769,
160
+ "eval_samples_per_second": 1.066,
161
+ "eval_steps_per_second": 0.533,
162
+ "step": 17
163
+ },
164
+ {
165
+ "epoch": 18.0,
166
+ "eval_accuracy": 1.0,
167
+ "eval_loss": 0.5915404558181763,
168
+ "eval_runtime": 1.5814,
169
+ "eval_samples_per_second": 1.265,
170
+ "eval_steps_per_second": 0.632,
171
+ "step": 18
172
+ },
173
+ {
174
+ "epoch": 19.0,
175
+ "eval_accuracy": 1.0,
176
+ "eval_loss": 0.5886735320091248,
177
+ "eval_runtime": 1.3821,
178
+ "eval_samples_per_second": 1.447,
179
+ "eval_steps_per_second": 0.724,
180
+ "step": 19
181
+ },
182
+ {
183
+ "epoch": 20.0,
184
+ "eval_accuracy": 1.0,
185
+ "eval_loss": 0.5856430530548096,
186
+ "eval_runtime": 1.3848,
187
+ "eval_samples_per_second": 1.444,
188
+ "eval_steps_per_second": 0.722,
189
+ "step": 20
190
+ },
191
+ {
192
+ "epoch": 21.0,
193
+ "eval_accuracy": 1.0,
194
+ "eval_loss": 0.5823870897293091,
195
+ "eval_runtime": 1.7061,
196
+ "eval_samples_per_second": 1.172,
197
+ "eval_steps_per_second": 0.586,
198
+ "step": 21
199
+ },
200
+ {
201
+ "epoch": 22.0,
202
+ "eval_accuracy": 1.0,
203
+ "eval_loss": 0.5790473222732544,
204
+ "eval_runtime": 1.3936,
205
+ "eval_samples_per_second": 1.435,
206
+ "eval_steps_per_second": 0.718,
207
+ "step": 22
208
+ },
209
+ {
210
+ "epoch": 23.0,
211
+ "eval_accuracy": 1.0,
212
+ "eval_loss": 0.5755370855331421,
213
+ "eval_runtime": 1.3861,
214
+ "eval_samples_per_second": 1.443,
215
+ "eval_steps_per_second": 0.721,
216
+ "step": 23
217
+ },
218
+ {
219
+ "epoch": 24.0,
220
+ "eval_accuracy": 1.0,
221
+ "eval_loss": 0.5718429088592529,
222
+ "eval_runtime": 1.3725,
223
+ "eval_samples_per_second": 1.457,
224
+ "eval_steps_per_second": 0.729,
225
+ "step": 24
226
+ },
227
+ {
228
+ "epoch": 25.0,
229
+ "eval_accuracy": 1.0,
230
+ "eval_loss": 0.5680269002914429,
231
+ "eval_runtime": 1.7263,
232
+ "eval_samples_per_second": 1.159,
233
+ "eval_steps_per_second": 0.579,
234
+ "step": 25
235
+ },
236
+ {
237
+ "epoch": 26.0,
238
+ "eval_accuracy": 1.0,
239
+ "eval_loss": 0.5640724897384644,
240
+ "eval_runtime": 1.4089,
241
+ "eval_samples_per_second": 1.42,
242
+ "eval_steps_per_second": 0.71,
243
+ "step": 26
244
+ },
245
+ {
246
+ "epoch": 27.0,
247
+ "eval_accuracy": 1.0,
248
+ "eval_loss": 0.5599097609519958,
249
+ "eval_runtime": 2.1519,
250
+ "eval_samples_per_second": 0.929,
251
+ "eval_steps_per_second": 0.465,
252
+ "step": 27
253
+ },
254
+ {
255
+ "epoch": 28.0,
256
+ "eval_accuracy": 1.0,
257
+ "eval_loss": 0.5556421875953674,
258
+ "eval_runtime": 1.4101,
259
+ "eval_samples_per_second": 1.418,
260
+ "eval_steps_per_second": 0.709,
261
+ "step": 28
262
+ },
263
+ {
264
+ "epoch": 29.0,
265
+ "eval_accuracy": 1.0,
266
+ "eval_loss": 0.5510818958282471,
267
+ "eval_runtime": 2.1457,
268
+ "eval_samples_per_second": 0.932,
269
+ "eval_steps_per_second": 0.466,
270
+ "step": 29
271
+ },
272
+ {
273
+ "epoch": 30.0,
274
+ "eval_accuracy": 1.0,
275
+ "eval_loss": 0.546416163444519,
276
+ "eval_runtime": 1.3884,
277
+ "eval_samples_per_second": 1.44,
278
+ "eval_steps_per_second": 0.72,
279
+ "step": 30
280
+ }
281
+ ],
282
+ "logging_steps": 500,
283
+ "max_steps": 30,
284
+ "num_train_epochs": 30,
285
+ "save_steps": 500,
286
+ "total_flos": 1.859807750750208e+16,
287
+ "trial_name": null,
288
+ "trial_params": null
289
+ }
checkpoint-30/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd5d1fc8d0b7b960b2b59233c9b9115ca14d45d365e535d615b924b1916664b
3
+ size 4027
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "signature",
13
+ "1": "photo"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "photo": 1,
20
+ "signature": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.33.3"
32
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5658fba10a875b4ec7312f364e80d131fa728fdc79fe782c1a3dce1a1a6c3c4f
3
+ size 343265965
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd5d1fc8d0b7b960b2b59233c9b9115ca14d45d365e535d615b924b1916664b
3
+ size 4027