howdyaendra commited on
Commit
65682e2
1 Parent(s): 6608173

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - xblock-social-screenshots-3/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metrics
22
+ loss: 0.7544093132019043
23
+
24
+ f1_macro: 0.34243943617676403
25
+
26
+ f1_micro: 0.7626506024096386
27
+
28
+ f1_weighted: 0.7295452326605777
29
+
30
+ precision_macro: 0.5376629516266399
31
+
32
+ precision_micro: 0.7626506024096386
33
+
34
+ precision_weighted: 0.7581766519951105
35
+
36
+ recall_macro: 0.3164834046818783
37
+
38
+ recall_micro: 0.7626506024096386
39
+
40
+ recall_weighted: 0.7626506024096386
41
+
42
+ accuracy: 0.7626506024096386
checkpoint-1245/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 11,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "altright",
14
+ "1": "bluesky",
15
+ "2": "facebook",
16
+ "3": "fediverse",
17
+ "4": "instagram",
18
+ "5": "reddit",
19
+ "6": "threads",
20
+ "7": "tumblr",
21
+ "8": "twitter",
22
+ "9": "uncategorised",
23
+ "10": "unrecognised-screenshot"
24
+ },
25
+ "image_size": 224,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 3072,
28
+ "label2id": {
29
+ "altright": 0,
30
+ "bluesky": 1,
31
+ "facebook": 2,
32
+ "fediverse": 3,
33
+ "instagram": 4,
34
+ "reddit": 5,
35
+ "threads": 6,
36
+ "tumblr": 7,
37
+ "twitter": 8,
38
+ "uncategorised": 9,
39
+ "unrecognised-screenshot": 10
40
+ },
41
+ "layer_norm_eps": 1e-12,
42
+ "model_type": "vit",
43
+ "num_attention_heads": 12,
44
+ "num_channels": 3,
45
+ "num_hidden_layers": 12,
46
+ "patch_size": 16,
47
+ "problem_type": "single_label_classification",
48
+ "qkv_bias": true,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.39.0"
51
+ }
checkpoint-1245/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f33136e913151d459bd50909ea58a4fef05765bb0b689634d66b0b17660e685b
3
+ size 343251660
checkpoint-1245/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51abf52801b3205c54a897f04839afd435947f03070a5f0e62780a33a8d185ec
3
+ size 686624186
checkpoint-1245/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7a935838d4458f13902cbfd7d67a47f63b37d1dd431141b42348cfd0572d11f
3
+ size 14180
checkpoint-1245/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8053f02486458a4c1aadfa3431194041e8e9faa02de6e3001ea4eaf37217ba69
3
+ size 1064
checkpoint-1245/trainer_state.json ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7544093132019043,
3
+ "best_model_checkpoint": "xblock-social-screenshots-3/checkpoint-1245",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1245,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": 13.465712547302246,
14
+ "learning_rate": 7.2e-06,
15
+ "loss": 2.4572,
16
+ "step": 20
17
+ },
18
+ {
19
+ "epoch": 0.1,
20
+ "grad_norm": 9.6524076461792,
21
+ "learning_rate": 1.52e-05,
22
+ "loss": 2.079,
23
+ "step": 40
24
+ },
25
+ {
26
+ "epoch": 0.14,
27
+ "grad_norm": 9.261948585510254,
28
+ "learning_rate": 2.32e-05,
29
+ "loss": 1.7089,
30
+ "step": 60
31
+ },
32
+ {
33
+ "epoch": 0.19,
34
+ "grad_norm": 9.193083763122559,
35
+ "learning_rate": 3.12e-05,
36
+ "loss": 1.4402,
37
+ "step": 80
38
+ },
39
+ {
40
+ "epoch": 0.24,
41
+ "grad_norm": 7.320693492889404,
42
+ "learning_rate": 3.9200000000000004e-05,
43
+ "loss": 1.407,
44
+ "step": 100
45
+ },
46
+ {
47
+ "epoch": 0.29,
48
+ "grad_norm": 7.089232444763184,
49
+ "learning_rate": 4.72e-05,
50
+ "loss": 1.2287,
51
+ "step": 120
52
+ },
53
+ {
54
+ "epoch": 0.34,
55
+ "grad_norm": 9.907115936279297,
56
+ "learning_rate": 4.941964285714286e-05,
57
+ "loss": 1.2205,
58
+ "step": 140
59
+ },
60
+ {
61
+ "epoch": 0.39,
62
+ "grad_norm": 6.660067081451416,
63
+ "learning_rate": 4.852678571428572e-05,
64
+ "loss": 1.0867,
65
+ "step": 160
66
+ },
67
+ {
68
+ "epoch": 0.43,
69
+ "grad_norm": 8.426629066467285,
70
+ "learning_rate": 4.7633928571428573e-05,
71
+ "loss": 1.097,
72
+ "step": 180
73
+ },
74
+ {
75
+ "epoch": 0.48,
76
+ "grad_norm": 9.229801177978516,
77
+ "learning_rate": 4.674107142857143e-05,
78
+ "loss": 1.4158,
79
+ "step": 200
80
+ },
81
+ {
82
+ "epoch": 0.53,
83
+ "grad_norm": 6.415766716003418,
84
+ "learning_rate": 4.584821428571429e-05,
85
+ "loss": 1.2353,
86
+ "step": 220
87
+ },
88
+ {
89
+ "epoch": 0.58,
90
+ "grad_norm": 9.118013381958008,
91
+ "learning_rate": 4.4955357142857144e-05,
92
+ "loss": 1.0926,
93
+ "step": 240
94
+ },
95
+ {
96
+ "epoch": 0.63,
97
+ "grad_norm": 8.883289337158203,
98
+ "learning_rate": 4.40625e-05,
99
+ "loss": 1.1642,
100
+ "step": 260
101
+ },
102
+ {
103
+ "epoch": 0.67,
104
+ "grad_norm": 6.868738651275635,
105
+ "learning_rate": 4.3169642857142864e-05,
106
+ "loss": 1.0041,
107
+ "step": 280
108
+ },
109
+ {
110
+ "epoch": 0.72,
111
+ "grad_norm": 2.424619436264038,
112
+ "learning_rate": 4.2276785714285714e-05,
113
+ "loss": 1.1074,
114
+ "step": 300
115
+ },
116
+ {
117
+ "epoch": 0.77,
118
+ "grad_norm": 10.195758819580078,
119
+ "learning_rate": 4.138392857142857e-05,
120
+ "loss": 0.8968,
121
+ "step": 320
122
+ },
123
+ {
124
+ "epoch": 0.82,
125
+ "grad_norm": 8.857401847839355,
126
+ "learning_rate": 4.0491071428571434e-05,
127
+ "loss": 1.2484,
128
+ "step": 340
129
+ },
130
+ {
131
+ "epoch": 0.87,
132
+ "grad_norm": 7.8076605796813965,
133
+ "learning_rate": 3.9598214285714284e-05,
134
+ "loss": 0.998,
135
+ "step": 360
136
+ },
137
+ {
138
+ "epoch": 0.92,
139
+ "grad_norm": 6.418033599853516,
140
+ "learning_rate": 3.870535714285715e-05,
141
+ "loss": 0.9212,
142
+ "step": 380
143
+ },
144
+ {
145
+ "epoch": 0.96,
146
+ "grad_norm": 7.953289985656738,
147
+ "learning_rate": 3.78125e-05,
148
+ "loss": 1.1387,
149
+ "step": 400
150
+ },
151
+ {
152
+ "epoch": 1.0,
153
+ "eval_accuracy": 0.7156626506024096,
154
+ "eval_f1_macro": 0.24337876592111682,
155
+ "eval_f1_micro": 0.7156626506024096,
156
+ "eval_f1_weighted": 0.6701704805513728,
157
+ "eval_loss": 0.9069176912307739,
158
+ "eval_precision_macro": 0.28735578588970384,
159
+ "eval_precision_micro": 0.7156626506024096,
160
+ "eval_precision_weighted": 0.6398455360285946,
161
+ "eval_recall_macro": 0.2455658658820232,
162
+ "eval_recall_micro": 0.7156626506024096,
163
+ "eval_recall_weighted": 0.7156626506024096,
164
+ "eval_runtime": 32.7637,
165
+ "eval_samples_per_second": 25.333,
166
+ "eval_steps_per_second": 1.587,
167
+ "step": 415
168
+ },
169
+ {
170
+ "epoch": 1.01,
171
+ "grad_norm": 10.716949462890625,
172
+ "learning_rate": 3.691964285714286e-05,
173
+ "loss": 0.974,
174
+ "step": 420
175
+ },
176
+ {
177
+ "epoch": 1.06,
178
+ "grad_norm": 6.424170970916748,
179
+ "learning_rate": 3.602678571428572e-05,
180
+ "loss": 0.9156,
181
+ "step": 440
182
+ },
183
+ {
184
+ "epoch": 1.11,
185
+ "grad_norm": 5.7230000495910645,
186
+ "learning_rate": 3.513392857142857e-05,
187
+ "loss": 1.0882,
188
+ "step": 460
189
+ },
190
+ {
191
+ "epoch": 1.16,
192
+ "grad_norm": 6.781800746917725,
193
+ "learning_rate": 3.424107142857143e-05,
194
+ "loss": 0.8575,
195
+ "step": 480
196
+ },
197
+ {
198
+ "epoch": 1.2,
199
+ "grad_norm": 7.278835296630859,
200
+ "learning_rate": 3.334821428571429e-05,
201
+ "loss": 0.8842,
202
+ "step": 500
203
+ },
204
+ {
205
+ "epoch": 1.25,
206
+ "grad_norm": 8.899985313415527,
207
+ "learning_rate": 3.2455357142857145e-05,
208
+ "loss": 1.0324,
209
+ "step": 520
210
+ },
211
+ {
212
+ "epoch": 1.3,
213
+ "grad_norm": 10.547853469848633,
214
+ "learning_rate": 3.15625e-05,
215
+ "loss": 0.9993,
216
+ "step": 540
217
+ },
218
+ {
219
+ "epoch": 1.35,
220
+ "grad_norm": 5.870782852172852,
221
+ "learning_rate": 3.066964285714286e-05,
222
+ "loss": 1.0766,
223
+ "step": 560
224
+ },
225
+ {
226
+ "epoch": 1.4,
227
+ "grad_norm": 7.413377285003662,
228
+ "learning_rate": 2.9776785714285715e-05,
229
+ "loss": 0.8224,
230
+ "step": 580
231
+ },
232
+ {
233
+ "epoch": 1.45,
234
+ "grad_norm": 8.232254981994629,
235
+ "learning_rate": 2.888392857142857e-05,
236
+ "loss": 0.8699,
237
+ "step": 600
238
+ },
239
+ {
240
+ "epoch": 1.49,
241
+ "grad_norm": 7.721341609954834,
242
+ "learning_rate": 2.7991071428571432e-05,
243
+ "loss": 1.0413,
244
+ "step": 620
245
+ },
246
+ {
247
+ "epoch": 1.54,
248
+ "grad_norm": 10.697331428527832,
249
+ "learning_rate": 2.7098214285714285e-05,
250
+ "loss": 0.995,
251
+ "step": 640
252
+ },
253
+ {
254
+ "epoch": 1.59,
255
+ "grad_norm": 5.213534355163574,
256
+ "learning_rate": 2.6205357142857145e-05,
257
+ "loss": 1.1162,
258
+ "step": 660
259
+ },
260
+ {
261
+ "epoch": 1.64,
262
+ "grad_norm": 4.949966907501221,
263
+ "learning_rate": 2.53125e-05,
264
+ "loss": 0.7447,
265
+ "step": 680
266
+ },
267
+ {
268
+ "epoch": 1.69,
269
+ "grad_norm": 8.088377952575684,
270
+ "learning_rate": 2.441964285714286e-05,
271
+ "loss": 0.7466,
272
+ "step": 700
273
+ },
274
+ {
275
+ "epoch": 1.73,
276
+ "grad_norm": 6.961575984954834,
277
+ "learning_rate": 2.3526785714285715e-05,
278
+ "loss": 0.8619,
279
+ "step": 720
280
+ },
281
+ {
282
+ "epoch": 1.78,
283
+ "grad_norm": 12.139084815979004,
284
+ "learning_rate": 2.2633928571428572e-05,
285
+ "loss": 0.6702,
286
+ "step": 740
287
+ },
288
+ {
289
+ "epoch": 1.83,
290
+ "grad_norm": 4.518425941467285,
291
+ "learning_rate": 2.174107142857143e-05,
292
+ "loss": 0.8649,
293
+ "step": 760
294
+ },
295
+ {
296
+ "epoch": 1.88,
297
+ "grad_norm": 7.617171764373779,
298
+ "learning_rate": 2.084821428571429e-05,
299
+ "loss": 0.7682,
300
+ "step": 780
301
+ },
302
+ {
303
+ "epoch": 1.93,
304
+ "grad_norm": 9.569235801696777,
305
+ "learning_rate": 1.9955357142857146e-05,
306
+ "loss": 0.9026,
307
+ "step": 800
308
+ },
309
+ {
310
+ "epoch": 1.98,
311
+ "grad_norm": 8.944718360900879,
312
+ "learning_rate": 1.90625e-05,
313
+ "loss": 0.8327,
314
+ "step": 820
315
+ },
316
+ {
317
+ "epoch": 2.0,
318
+ "eval_accuracy": 0.744578313253012,
319
+ "eval_f1_macro": 0.328500713177248,
320
+ "eval_f1_micro": 0.744578313253012,
321
+ "eval_f1_weighted": 0.7121093039405768,
322
+ "eval_loss": 0.7984326481819153,
323
+ "eval_precision_macro": 0.4303939410339066,
324
+ "eval_precision_micro": 0.744578313253012,
325
+ "eval_precision_weighted": 0.7177621227889628,
326
+ "eval_recall_macro": 0.30809268775929033,
327
+ "eval_recall_micro": 0.744578313253012,
328
+ "eval_recall_weighted": 0.744578313253012,
329
+ "eval_runtime": 30.47,
330
+ "eval_samples_per_second": 27.24,
331
+ "eval_steps_per_second": 1.707,
332
+ "step": 830
333
+ },
334
+ {
335
+ "epoch": 2.02,
336
+ "grad_norm": 6.886695861816406,
337
+ "learning_rate": 1.8169642857142856e-05,
338
+ "loss": 0.8599,
339
+ "step": 840
340
+ },
341
+ {
342
+ "epoch": 2.07,
343
+ "grad_norm": 3.803279399871826,
344
+ "learning_rate": 1.7276785714285716e-05,
345
+ "loss": 0.7697,
346
+ "step": 860
347
+ },
348
+ {
349
+ "epoch": 2.12,
350
+ "grad_norm": 8.693037033081055,
351
+ "learning_rate": 1.6383928571428573e-05,
352
+ "loss": 0.7916,
353
+ "step": 880
354
+ },
355
+ {
356
+ "epoch": 2.17,
357
+ "grad_norm": 10.645078659057617,
358
+ "learning_rate": 1.549107142857143e-05,
359
+ "loss": 0.6787,
360
+ "step": 900
361
+ },
362
+ {
363
+ "epoch": 2.22,
364
+ "grad_norm": 6.220460414886475,
365
+ "learning_rate": 1.4598214285714288e-05,
366
+ "loss": 0.7976,
367
+ "step": 920
368
+ },
369
+ {
370
+ "epoch": 2.27,
371
+ "grad_norm": 10.7758150100708,
372
+ "learning_rate": 1.3705357142857145e-05,
373
+ "loss": 0.6825,
374
+ "step": 940
375
+ },
376
+ {
377
+ "epoch": 2.31,
378
+ "grad_norm": 11.92375373840332,
379
+ "learning_rate": 1.28125e-05,
380
+ "loss": 0.853,
381
+ "step": 960
382
+ },
383
+ {
384
+ "epoch": 2.36,
385
+ "grad_norm": 8.55823040008545,
386
+ "learning_rate": 1.1919642857142858e-05,
387
+ "loss": 0.7647,
388
+ "step": 980
389
+ },
390
+ {
391
+ "epoch": 2.41,
392
+ "grad_norm": 10.385089874267578,
393
+ "learning_rate": 1.1026785714285715e-05,
394
+ "loss": 0.7351,
395
+ "step": 1000
396
+ },
397
+ {
398
+ "epoch": 2.46,
399
+ "grad_norm": 6.002354621887207,
400
+ "learning_rate": 1.0133928571428572e-05,
401
+ "loss": 0.8779,
402
+ "step": 1020
403
+ },
404
+ {
405
+ "epoch": 2.51,
406
+ "grad_norm": 7.847404956817627,
407
+ "learning_rate": 9.24107142857143e-06,
408
+ "loss": 0.7776,
409
+ "step": 1040
410
+ },
411
+ {
412
+ "epoch": 2.55,
413
+ "grad_norm": 5.796429634094238,
414
+ "learning_rate": 8.348214285714285e-06,
415
+ "loss": 0.743,
416
+ "step": 1060
417
+ },
418
+ {
419
+ "epoch": 2.6,
420
+ "grad_norm": 8.282535552978516,
421
+ "learning_rate": 7.455357142857143e-06,
422
+ "loss": 0.8694,
423
+ "step": 1080
424
+ },
425
+ {
426
+ "epoch": 2.65,
427
+ "grad_norm": 9.527463912963867,
428
+ "learning_rate": 6.5625e-06,
429
+ "loss": 0.7111,
430
+ "step": 1100
431
+ },
432
+ {
433
+ "epoch": 2.7,
434
+ "grad_norm": 7.947423458099365,
435
+ "learning_rate": 5.669642857142857e-06,
436
+ "loss": 0.9326,
437
+ "step": 1120
438
+ },
439
+ {
440
+ "epoch": 2.75,
441
+ "grad_norm": 8.990558624267578,
442
+ "learning_rate": 4.776785714285715e-06,
443
+ "loss": 0.7542,
444
+ "step": 1140
445
+ },
446
+ {
447
+ "epoch": 2.8,
448
+ "grad_norm": 8.609009742736816,
449
+ "learning_rate": 3.883928571428571e-06,
450
+ "loss": 0.7247,
451
+ "step": 1160
452
+ },
453
+ {
454
+ "epoch": 2.84,
455
+ "grad_norm": 3.701921224594116,
456
+ "learning_rate": 2.991071428571429e-06,
457
+ "loss": 0.6855,
458
+ "step": 1180
459
+ },
460
+ {
461
+ "epoch": 2.89,
462
+ "grad_norm": 6.839054584503174,
463
+ "learning_rate": 2.0982142857142857e-06,
464
+ "loss": 0.7191,
465
+ "step": 1200
466
+ },
467
+ {
468
+ "epoch": 2.94,
469
+ "grad_norm": 5.237525939941406,
470
+ "learning_rate": 1.205357142857143e-06,
471
+ "loss": 0.592,
472
+ "step": 1220
473
+ },
474
+ {
475
+ "epoch": 2.99,
476
+ "grad_norm": 6.806934356689453,
477
+ "learning_rate": 3.5714285714285716e-07,
478
+ "loss": 0.7655,
479
+ "step": 1240
480
+ },
481
+ {
482
+ "epoch": 3.0,
483
+ "eval_accuracy": 0.7626506024096386,
484
+ "eval_f1_macro": 0.34243943617676403,
485
+ "eval_f1_micro": 0.7626506024096386,
486
+ "eval_f1_weighted": 0.7295452326605777,
487
+ "eval_loss": 0.7544093132019043,
488
+ "eval_precision_macro": 0.5376629516266399,
489
+ "eval_precision_micro": 0.7626506024096386,
490
+ "eval_precision_weighted": 0.7581766519951105,
491
+ "eval_recall_macro": 0.3164834046818783,
492
+ "eval_recall_micro": 0.7626506024096386,
493
+ "eval_recall_weighted": 0.7626506024096386,
494
+ "eval_runtime": 30.5059,
495
+ "eval_samples_per_second": 27.208,
496
+ "eval_steps_per_second": 1.705,
497
+ "step": 1245
498
+ }
499
+ ],
500
+ "logging_steps": 20,
501
+ "max_steps": 1245,
502
+ "num_input_tokens_seen": 0,
503
+ "num_train_epochs": 3,
504
+ "save_steps": 500,
505
+ "total_flos": 7.702550117363405e+17,
506
+ "train_batch_size": 8,
507
+ "trial_name": null,
508
+ "trial_params": null
509
+ }
checkpoint-1245/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e05c6f2d7952d1f3b0c85c8ffecdcd376054441e457e1118f846ae556de4bf0
3
+ size 4920
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 11,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "altright",
14
+ "1": "bluesky",
15
+ "2": "facebook",
16
+ "3": "fediverse",
17
+ "4": "instagram",
18
+ "5": "reddit",
19
+ "6": "threads",
20
+ "7": "tumblr",
21
+ "8": "twitter",
22
+ "9": "uncategorised",
23
+ "10": "unrecognised-screenshot"
24
+ },
25
+ "image_size": 224,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 3072,
28
+ "label2id": {
29
+ "altright": 0,
30
+ "bluesky": 1,
31
+ "facebook": 2,
32
+ "fediverse": 3,
33
+ "instagram": 4,
34
+ "reddit": 5,
35
+ "threads": 6,
36
+ "tumblr": 7,
37
+ "twitter": 8,
38
+ "uncategorised": 9,
39
+ "unrecognised-screenshot": 10
40
+ },
41
+ "layer_norm_eps": 1e-12,
42
+ "model_type": "vit",
43
+ "num_attention_heads": 12,
44
+ "num_channels": 3,
45
+ "num_hidden_layers": 12,
46
+ "patch_size": 16,
47
+ "problem_type": "single_label_classification",
48
+ "qkv_bias": true,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.39.0"
51
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f33136e913151d459bd50909ea58a4fef05765bb0b689634d66b0b17660e685b
3
+ size 343251660
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e05c6f2d7952d1f3b0c85c8ffecdcd376054441e457e1118f846ae556de4bf0
3
+ size 4920
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "xblock-social-screenshots-3/autotrain-data",
3
+ "model": "google/vit-base-patch16-224",
4
+ "username": "howdyaendra",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "xblock-social-screenshots-3",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "howdyaendra/xblock-social-screenshots-3",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }