howdyaendra commited on
Commit
2b3c2fb
1 Parent(s): 6bb6dbb

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - xblock-large-patch1-224/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metrics
22
+ loss: 0.9025482535362244
23
+
24
+ f1_macro: 0.6858654529218409
25
+
26
+ f1_micro: 0.6716867469879518
27
+
28
+ f1_weighted: 0.676828467951081
29
+
30
+ precision_macro: 0.7239086041672248
31
+
32
+ precision_micro: 0.6716867469879518
33
+
34
+ precision_weighted: 0.7046011538585282
35
+
36
+ recall_macro: 0.6707409732185557
37
+
38
+ recall_micro: 0.6716867469879518
39
+
40
+ recall_weighted: 0.6716867469879518
41
+
42
+ accuracy: 0.6716867469879518
checkpoint-498/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "_num_labels": 10,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "altright",
14
+ "1": "facebook",
15
+ "2": "fediverse",
16
+ "3": "instagram",
17
+ "4": "irrelevant",
18
+ "5": "ngl",
19
+ "6": "reddit",
20
+ "7": "threads",
21
+ "8": "tumblr",
22
+ "9": "unrecognised-screenshot"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 4096,
27
+ "label2id": {
28
+ "altright": 0,
29
+ "facebook": 1,
30
+ "fediverse": 2,
31
+ "instagram": 3,
32
+ "irrelevant": 4,
33
+ "ngl": 5,
34
+ "reddit": 6,
35
+ "threads": 7,
36
+ "tumblr": 8,
37
+ "unrecognised-screenshot": 9
38
+ },
39
+ "layer_norm_eps": 1e-12,
40
+ "model_type": "vit",
41
+ "num_attention_heads": 16,
42
+ "num_channels": 3,
43
+ "num_hidden_layers": 24,
44
+ "patch_size": 16,
45
+ "problem_type": "single_label_classification",
46
+ "qkv_bias": true,
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.39.3"
49
+ }
checkpoint-498/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b064e1af2408cf693c150c7d076751922910f72841acf808da8c6e5620941fd
3
+ size 1213294072
checkpoint-498/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b472eba83afb689e3d3bc5114918cb544a6c6ac34e9e4a5e55f696d3b31d5e6
3
+ size 2426823082
checkpoint-498/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26d5e6c5968e8878a359cd5f656bcfb66f14a3c9b60e422772c62ae09c0a4d78
3
+ size 14244
checkpoint-498/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:053550de8b49e6093cd0c405cf46538ddd338dbb49eaffc092b7a83af27915aa
3
+ size 1064
checkpoint-498/trainer_state.json ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9025482535362244,
3
+ "best_model_checkpoint": "xblock-large-patch1-224/checkpoint-498",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 498,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": Infinity,
14
+ "learning_rate": 7.000000000000001e-06,
15
+ "loss": 2.4849,
16
+ "step": 8
17
+ },
18
+ {
19
+ "epoch": 0.1,
20
+ "grad_norm": 11.997095108032227,
21
+ "learning_rate": 1.5e-05,
22
+ "loss": 2.3188,
23
+ "step": 16
24
+ },
25
+ {
26
+ "epoch": 0.14,
27
+ "grad_norm": 12.830357551574707,
28
+ "learning_rate": 2.2000000000000003e-05,
29
+ "loss": 2.1147,
30
+ "step": 24
31
+ },
32
+ {
33
+ "epoch": 0.19,
34
+ "grad_norm": 11.86018180847168,
35
+ "learning_rate": 3e-05,
36
+ "loss": 1.9589,
37
+ "step": 32
38
+ },
39
+ {
40
+ "epoch": 0.24,
41
+ "grad_norm": 13.479438781738281,
42
+ "learning_rate": 3.8e-05,
43
+ "loss": 1.7637,
44
+ "step": 40
45
+ },
46
+ {
47
+ "epoch": 0.29,
48
+ "grad_norm": 13.69570255279541,
49
+ "learning_rate": 4.600000000000001e-05,
50
+ "loss": 1.8184,
51
+ "step": 48
52
+ },
53
+ {
54
+ "epoch": 0.34,
55
+ "grad_norm": 9.349321365356445,
56
+ "learning_rate": 4.955357142857143e-05,
57
+ "loss": 2.0031,
58
+ "step": 56
59
+ },
60
+ {
61
+ "epoch": 0.39,
62
+ "grad_norm": 11.169551849365234,
63
+ "learning_rate": 4.866071428571429e-05,
64
+ "loss": 1.7263,
65
+ "step": 64
66
+ },
67
+ {
68
+ "epoch": 0.43,
69
+ "grad_norm": 9.97819995880127,
70
+ "learning_rate": 4.7767857142857144e-05,
71
+ "loss": 1.9287,
72
+ "step": 72
73
+ },
74
+ {
75
+ "epoch": 0.48,
76
+ "grad_norm": 11.742727279663086,
77
+ "learning_rate": 4.6875e-05,
78
+ "loss": 1.814,
79
+ "step": 80
80
+ },
81
+ {
82
+ "epoch": 0.53,
83
+ "grad_norm": 8.304338455200195,
84
+ "learning_rate": 4.598214285714286e-05,
85
+ "loss": 1.4417,
86
+ "step": 88
87
+ },
88
+ {
89
+ "epoch": 0.58,
90
+ "grad_norm": 10.799261093139648,
91
+ "learning_rate": 4.5089285714285714e-05,
92
+ "loss": 1.7538,
93
+ "step": 96
94
+ },
95
+ {
96
+ "epoch": 0.63,
97
+ "grad_norm": 8.281989097595215,
98
+ "learning_rate": 4.419642857142857e-05,
99
+ "loss": 1.7161,
100
+ "step": 104
101
+ },
102
+ {
103
+ "epoch": 0.67,
104
+ "grad_norm": 7.088228225708008,
105
+ "learning_rate": 4.3303571428571435e-05,
106
+ "loss": 1.7526,
107
+ "step": 112
108
+ },
109
+ {
110
+ "epoch": 0.72,
111
+ "grad_norm": 9.898828506469727,
112
+ "learning_rate": 4.2410714285714285e-05,
113
+ "loss": 1.8377,
114
+ "step": 120
115
+ },
116
+ {
117
+ "epoch": 0.77,
118
+ "grad_norm": 9.417756080627441,
119
+ "learning_rate": 4.151785714285715e-05,
120
+ "loss": 1.6742,
121
+ "step": 128
122
+ },
123
+ {
124
+ "epoch": 0.82,
125
+ "grad_norm": 10.03836441040039,
126
+ "learning_rate": 4.0625000000000005e-05,
127
+ "loss": 1.5157,
128
+ "step": 136
129
+ },
130
+ {
131
+ "epoch": 0.87,
132
+ "grad_norm": 10.27881908416748,
133
+ "learning_rate": 3.9732142857142855e-05,
134
+ "loss": 1.5828,
135
+ "step": 144
136
+ },
137
+ {
138
+ "epoch": 0.92,
139
+ "grad_norm": 10.77905559539795,
140
+ "learning_rate": 3.883928571428572e-05,
141
+ "loss": 1.3418,
142
+ "step": 152
143
+ },
144
+ {
145
+ "epoch": 0.96,
146
+ "grad_norm": 12.170005798339844,
147
+ "learning_rate": 3.794642857142857e-05,
148
+ "loss": 1.8624,
149
+ "step": 160
150
+ },
151
+ {
152
+ "epoch": 1.0,
153
+ "eval_accuracy": 0.5271084337349398,
154
+ "eval_f1_macro": 0.4518532713560705,
155
+ "eval_f1_micro": 0.5271084337349398,
156
+ "eval_f1_weighted": 0.496654440865482,
157
+ "eval_loss": 1.4107117652893066,
158
+ "eval_precision_macro": 0.5016472507129397,
159
+ "eval_precision_micro": 0.5271084337349398,
160
+ "eval_precision_weighted": 0.5568483249244561,
161
+ "eval_recall_macro": 0.47663109756097566,
162
+ "eval_recall_micro": 0.5271084337349398,
163
+ "eval_recall_weighted": 0.5271084337349398,
164
+ "eval_runtime": 316.798,
165
+ "eval_samples_per_second": 1.048,
166
+ "eval_steps_per_second": 0.066,
167
+ "step": 166
168
+ },
169
+ {
170
+ "epoch": 1.01,
171
+ "grad_norm": 8.142753601074219,
172
+ "learning_rate": 3.716517857142857e-05,
173
+ "loss": 1.5166,
174
+ "step": 168
175
+ },
176
+ {
177
+ "epoch": 1.06,
178
+ "grad_norm": 5.322903633117676,
179
+ "learning_rate": 3.627232142857143e-05,
180
+ "loss": 1.1869,
181
+ "step": 176
182
+ },
183
+ {
184
+ "epoch": 1.11,
185
+ "grad_norm": 9.545618057250977,
186
+ "learning_rate": 3.5379464285714287e-05,
187
+ "loss": 1.3475,
188
+ "step": 184
189
+ },
190
+ {
191
+ "epoch": 1.16,
192
+ "grad_norm": 13.654799461364746,
193
+ "learning_rate": 3.448660714285715e-05,
194
+ "loss": 1.4239,
195
+ "step": 192
196
+ },
197
+ {
198
+ "epoch": 1.2,
199
+ "grad_norm": 8.991721153259277,
200
+ "learning_rate": 3.359375e-05,
201
+ "loss": 1.3054,
202
+ "step": 200
203
+ },
204
+ {
205
+ "epoch": 1.25,
206
+ "grad_norm": 12.191709518432617,
207
+ "learning_rate": 3.270089285714286e-05,
208
+ "loss": 1.1966,
209
+ "step": 208
210
+ },
211
+ {
212
+ "epoch": 1.3,
213
+ "grad_norm": 9.003186225891113,
214
+ "learning_rate": 3.1808035714285713e-05,
215
+ "loss": 1.2069,
216
+ "step": 216
217
+ },
218
+ {
219
+ "epoch": 1.35,
220
+ "grad_norm": 12.034103393554688,
221
+ "learning_rate": 3.091517857142857e-05,
222
+ "loss": 1.4924,
223
+ "step": 224
224
+ },
225
+ {
226
+ "epoch": 1.4,
227
+ "grad_norm": 12.357869148254395,
228
+ "learning_rate": 3.013392857142857e-05,
229
+ "loss": 1.4235,
230
+ "step": 232
231
+ },
232
+ {
233
+ "epoch": 1.45,
234
+ "grad_norm": 13.986096382141113,
235
+ "learning_rate": 2.9241071428571432e-05,
236
+ "loss": 1.3803,
237
+ "step": 240
238
+ },
239
+ {
240
+ "epoch": 1.49,
241
+ "grad_norm": 10.213234901428223,
242
+ "learning_rate": 2.8348214285714285e-05,
243
+ "loss": 1.2811,
244
+ "step": 248
245
+ },
246
+ {
247
+ "epoch": 1.54,
248
+ "grad_norm": 11.94521713256836,
249
+ "learning_rate": 2.7455357142857145e-05,
250
+ "loss": 1.3474,
251
+ "step": 256
252
+ },
253
+ {
254
+ "epoch": 1.59,
255
+ "grad_norm": 7.413544654846191,
256
+ "learning_rate": 2.6562500000000002e-05,
257
+ "loss": 1.2188,
258
+ "step": 264
259
+ },
260
+ {
261
+ "epoch": 1.64,
262
+ "grad_norm": 6.420960426330566,
263
+ "learning_rate": 2.5669642857142855e-05,
264
+ "loss": 1.195,
265
+ "step": 272
266
+ },
267
+ {
268
+ "epoch": 1.69,
269
+ "grad_norm": 7.711160659790039,
270
+ "learning_rate": 2.4776785714285715e-05,
271
+ "loss": 1.4389,
272
+ "step": 280
273
+ },
274
+ {
275
+ "epoch": 1.73,
276
+ "grad_norm": 7.766310214996338,
277
+ "learning_rate": 2.3883928571428572e-05,
278
+ "loss": 1.5062,
279
+ "step": 288
280
+ },
281
+ {
282
+ "epoch": 1.78,
283
+ "grad_norm": 11.04636001586914,
284
+ "learning_rate": 2.299107142857143e-05,
285
+ "loss": 1.2456,
286
+ "step": 296
287
+ },
288
+ {
289
+ "epoch": 1.83,
290
+ "grad_norm": 7.171872138977051,
291
+ "learning_rate": 2.2098214285714286e-05,
292
+ "loss": 0.7718,
293
+ "step": 304
294
+ },
295
+ {
296
+ "epoch": 1.88,
297
+ "grad_norm": 9.676796913146973,
298
+ "learning_rate": 2.1205357142857142e-05,
299
+ "loss": 1.0983,
300
+ "step": 312
301
+ },
302
+ {
303
+ "epoch": 1.93,
304
+ "grad_norm": 9.70329761505127,
305
+ "learning_rate": 2.0312500000000002e-05,
306
+ "loss": 1.0594,
307
+ "step": 320
308
+ },
309
+ {
310
+ "epoch": 1.98,
311
+ "grad_norm": 10.712843894958496,
312
+ "learning_rate": 1.941964285714286e-05,
313
+ "loss": 1.2865,
314
+ "step": 328
315
+ },
316
+ {
317
+ "epoch": 2.0,
318
+ "eval_accuracy": 0.6295180722891566,
319
+ "eval_f1_macro": 0.6326430342148868,
320
+ "eval_f1_micro": 0.6295180722891566,
321
+ "eval_f1_weighted": 0.6385426615207972,
322
+ "eval_loss": 1.0817334651947021,
323
+ "eval_precision_macro": 0.6845586183973281,
324
+ "eval_precision_micro": 0.6295180722891566,
325
+ "eval_precision_weighted": 0.6861824846026167,
326
+ "eval_recall_macro": 0.6314774629363941,
327
+ "eval_recall_micro": 0.6295180722891566,
328
+ "eval_recall_weighted": 0.6295180722891566,
329
+ "eval_runtime": 311.4659,
330
+ "eval_samples_per_second": 1.066,
331
+ "eval_steps_per_second": 0.067,
332
+ "step": 332
333
+ },
334
+ {
335
+ "epoch": 2.02,
336
+ "grad_norm": 12.722149848937988,
337
+ "learning_rate": 1.8526785714285716e-05,
338
+ "loss": 1.0814,
339
+ "step": 336
340
+ },
341
+ {
342
+ "epoch": 2.07,
343
+ "grad_norm": 7.002964019775391,
344
+ "learning_rate": 1.7633928571428573e-05,
345
+ "loss": 0.8135,
346
+ "step": 344
347
+ },
348
+ {
349
+ "epoch": 2.12,
350
+ "grad_norm": 7.798354625701904,
351
+ "learning_rate": 1.674107142857143e-05,
352
+ "loss": 0.9007,
353
+ "step": 352
354
+ },
355
+ {
356
+ "epoch": 2.17,
357
+ "grad_norm": 5.395328998565674,
358
+ "learning_rate": 1.5848214285714286e-05,
359
+ "loss": 0.8786,
360
+ "step": 360
361
+ },
362
+ {
363
+ "epoch": 2.22,
364
+ "grad_norm": 11.069001197814941,
365
+ "learning_rate": 1.4955357142857143e-05,
366
+ "loss": 1.0465,
367
+ "step": 368
368
+ },
369
+ {
370
+ "epoch": 2.27,
371
+ "grad_norm": 11.216327667236328,
372
+ "learning_rate": 1.4062500000000001e-05,
373
+ "loss": 1.1112,
374
+ "step": 376
375
+ },
376
+ {
377
+ "epoch": 2.31,
378
+ "grad_norm": 10.935038566589355,
379
+ "learning_rate": 1.3169642857142858e-05,
380
+ "loss": 0.8433,
381
+ "step": 384
382
+ },
383
+ {
384
+ "epoch": 2.36,
385
+ "grad_norm": 18.220169067382812,
386
+ "learning_rate": 1.2276785714285715e-05,
387
+ "loss": 0.812,
388
+ "step": 392
389
+ },
390
+ {
391
+ "epoch": 2.41,
392
+ "grad_norm": 13.36108112335205,
393
+ "learning_rate": 1.1383928571428572e-05,
394
+ "loss": 1.0622,
395
+ "step": 400
396
+ },
397
+ {
398
+ "epoch": 2.46,
399
+ "grad_norm": 12.954853057861328,
400
+ "learning_rate": 1.049107142857143e-05,
401
+ "loss": 0.8341,
402
+ "step": 408
403
+ },
404
+ {
405
+ "epoch": 2.51,
406
+ "grad_norm": 15.882329940795898,
407
+ "learning_rate": 9.598214285714287e-06,
408
+ "loss": 0.8285,
409
+ "step": 416
410
+ },
411
+ {
412
+ "epoch": 2.55,
413
+ "grad_norm": 7.79279899597168,
414
+ "learning_rate": 8.705357142857143e-06,
415
+ "loss": 0.9739,
416
+ "step": 424
417
+ },
418
+ {
419
+ "epoch": 2.6,
420
+ "grad_norm": 11.043404579162598,
421
+ "learning_rate": 7.8125e-06,
422
+ "loss": 0.9797,
423
+ "step": 432
424
+ },
425
+ {
426
+ "epoch": 2.65,
427
+ "grad_norm": 7.065421104431152,
428
+ "learning_rate": 6.919642857142858e-06,
429
+ "loss": 0.8766,
430
+ "step": 440
431
+ },
432
+ {
433
+ "epoch": 2.7,
434
+ "grad_norm": 7.5092878341674805,
435
+ "learning_rate": 6.0267857142857145e-06,
436
+ "loss": 0.637,
437
+ "step": 448
438
+ },
439
+ {
440
+ "epoch": 2.75,
441
+ "grad_norm": 10.960742950439453,
442
+ "learning_rate": 5.133928571428571e-06,
443
+ "loss": 0.9343,
444
+ "step": 456
445
+ },
446
+ {
447
+ "epoch": 2.8,
448
+ "grad_norm": 23.01622772216797,
449
+ "learning_rate": 4.241071428571429e-06,
450
+ "loss": 1.0866,
451
+ "step": 464
452
+ },
453
+ {
454
+ "epoch": 2.84,
455
+ "grad_norm": 2.7104849815368652,
456
+ "learning_rate": 3.348214285714286e-06,
457
+ "loss": 0.7754,
458
+ "step": 472
459
+ },
460
+ {
461
+ "epoch": 2.89,
462
+ "grad_norm": 16.19709014892578,
463
+ "learning_rate": 2.455357142857143e-06,
464
+ "loss": 0.9792,
465
+ "step": 480
466
+ },
467
+ {
468
+ "epoch": 2.94,
469
+ "grad_norm": 9.475327491760254,
470
+ "learning_rate": 1.5625e-06,
471
+ "loss": 0.8772,
472
+ "step": 488
473
+ },
474
+ {
475
+ "epoch": 2.99,
476
+ "grad_norm": 7.728980541229248,
477
+ "learning_rate": 6.696428571428571e-07,
478
+ "loss": 0.7722,
479
+ "step": 496
480
+ },
481
+ {
482
+ "epoch": 3.0,
483
+ "eval_accuracy": 0.6716867469879518,
484
+ "eval_f1_macro": 0.6858654529218409,
485
+ "eval_f1_micro": 0.6716867469879518,
486
+ "eval_f1_weighted": 0.676828467951081,
487
+ "eval_loss": 0.9025482535362244,
488
+ "eval_precision_macro": 0.7239086041672248,
489
+ "eval_precision_micro": 0.6716867469879518,
490
+ "eval_precision_weighted": 0.7046011538585282,
491
+ "eval_recall_macro": 0.6707409732185557,
492
+ "eval_recall_micro": 0.6716867469879518,
493
+ "eval_recall_weighted": 0.6716867469879518,
494
+ "eval_runtime": 302.7239,
495
+ "eval_samples_per_second": 1.097,
496
+ "eval_steps_per_second": 0.069,
497
+ "step": 498
498
+ }
499
+ ],
500
+ "logging_steps": 8,
501
+ "max_steps": 498,
502
+ "num_input_tokens_seen": 0,
503
+ "num_train_epochs": 3,
504
+ "save_steps": 500,
505
+ "total_flos": 1.0897396284801761e+18,
506
+ "train_batch_size": 8,
507
+ "trial_name": null,
508
+ "trial_params": null
509
+ }
checkpoint-498/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36bcaf4b017105a6b5c611351b3463f4ec882dc8f5f08694d8c26469765c61d
3
+ size 4920
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-large-patch16-224",
3
+ "_num_labels": 10,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "altright",
14
+ "1": "facebook",
15
+ "2": "fediverse",
16
+ "3": "instagram",
17
+ "4": "irrelevant",
18
+ "5": "ngl",
19
+ "6": "reddit",
20
+ "7": "threads",
21
+ "8": "tumblr",
22
+ "9": "unrecognised-screenshot"
23
+ },
24
+ "image_size": 224,
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 4096,
27
+ "label2id": {
28
+ "altright": 0,
29
+ "facebook": 1,
30
+ "fediverse": 2,
31
+ "instagram": 3,
32
+ "irrelevant": 4,
33
+ "ngl": 5,
34
+ "reddit": 6,
35
+ "threads": 7,
36
+ "tumblr": 8,
37
+ "unrecognised-screenshot": 9
38
+ },
39
+ "layer_norm_eps": 1e-12,
40
+ "model_type": "vit",
41
+ "num_attention_heads": 16,
42
+ "num_channels": 3,
43
+ "num_hidden_layers": 24,
44
+ "patch_size": 16,
45
+ "problem_type": "single_label_classification",
46
+ "qkv_bias": true,
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.39.3"
49
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b064e1af2408cf693c150c7d076751922910f72841acf808da8c6e5620941fd
3
+ size 1213294072
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
runs/Apr13_16-43-06_496e75b93dc4/events.out.tfevents.1713026587.496e75b93dc4.20212.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afe56530f22d60f93cefe7afdadb28e58362c41afc3cd9514f3884e13e812ee6
3
- size 18192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0709527a78dd890caa47ab0a0a76dd83a24c6931d74ce45019954d0cc716895d
3
+ size 20856
runs/Apr13_16-43-06_496e75b93dc4/events.out.tfevents.1713033878.496e75b93dc4.20212.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01fcb5b2c23f4051d31f4f5a1e6338a5f145e8deb65df4d22da59d7299c8f273
3
+ size 921
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36bcaf4b017105a6b5c611351b3463f4ec882dc8f5f08694d8c26469765c61d
3
+ size 4920
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "xblock-large-patch1-224/autotrain-data",
3
+ "model": "google/vit-large-patch16-224",
4
+ "username": "howdyaendra",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "xblock-large-patch1-224",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "howdyaendra/xblock-large-patch1-224",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "tensorboard"
29
+ }