Zmu commited on
Commit
b021bb0
1 Parent(s): 6bd556b

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - Zmu/autotrain-data-autotrain-81svx-t510h
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metricsg
22
+ loss: 0.028854751959443092
23
+
24
+ f1: 0.9889064976228209
25
+
26
+ precision: 0.9936305732484076
27
+
28
+ recall: 0.9842271293375394
29
+
30
+ auc: 0.9995604281946529
31
+
32
+ accuracy: 0.9897510980966325
checkpoint-1026/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Falconsai/nsfw_image_detection",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "normal",
14
+ "1": "nsfw"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "normal": 0,
21
+ "nsfw": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.36.1"
33
+ }
checkpoint-1026/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f980071de09d8ac3c73d02e88884cce7ba54433533689fb4f9d7514f7e668543
3
+ size 343223968
checkpoint-1026/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bd2f6dbb557af34d20e43866be697c333cf3accce6ce1401662d04046b3821b
3
+ size 686568890
checkpoint-1026/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c9b87a99c9e4df9470c7ceff7a5170aa5410a5d8a29b85689b6fc290f786393
3
+ size 14244
checkpoint-1026/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e441a3979e3b334fe258c10f6214158f2a53728e3216c2fc65be45bf763f12
3
+ size 1064
checkpoint-1026/trainer_state.json ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.028854751959443092,
3
+ "best_model_checkpoint": "/tmp/model/checkpoint-1026",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1026,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "learning_rate": 7.281553398058253e-06,
14
+ "loss": 2.6614,
15
+ "step": 17
16
+ },
17
+ {
18
+ "epoch": 0.1,
19
+ "learning_rate": 1.5048543689320387e-05,
20
+ "loss": 1.2662,
21
+ "step": 34
22
+ },
23
+ {
24
+ "epoch": 0.15,
25
+ "learning_rate": 2.330097087378641e-05,
26
+ "loss": 0.5076,
27
+ "step": 51
28
+ },
29
+ {
30
+ "epoch": 0.2,
31
+ "learning_rate": 3.155339805825243e-05,
32
+ "loss": 0.4482,
33
+ "step": 68
34
+ },
35
+ {
36
+ "epoch": 0.25,
37
+ "learning_rate": 3.980582524271845e-05,
38
+ "loss": 0.3258,
39
+ "step": 85
40
+ },
41
+ {
42
+ "epoch": 0.3,
43
+ "learning_rate": 4.805825242718447e-05,
44
+ "loss": 0.2473,
45
+ "step": 102
46
+ },
47
+ {
48
+ "epoch": 0.35,
49
+ "learning_rate": 4.929577464788733e-05,
50
+ "loss": 0.3911,
51
+ "step": 119
52
+ },
53
+ {
54
+ "epoch": 0.4,
55
+ "learning_rate": 4.837486457204768e-05,
56
+ "loss": 0.4416,
57
+ "step": 136
58
+ },
59
+ {
60
+ "epoch": 0.45,
61
+ "learning_rate": 4.745395449620802e-05,
62
+ "loss": 0.4884,
63
+ "step": 153
64
+ },
65
+ {
66
+ "epoch": 0.5,
67
+ "learning_rate": 4.653304442036837e-05,
68
+ "loss": 0.2937,
69
+ "step": 170
70
+ },
71
+ {
72
+ "epoch": 0.55,
73
+ "learning_rate": 4.561213434452871e-05,
74
+ "loss": 0.3657,
75
+ "step": 187
76
+ },
77
+ {
78
+ "epoch": 0.6,
79
+ "learning_rate": 4.469122426868906e-05,
80
+ "loss": 0.1776,
81
+ "step": 204
82
+ },
83
+ {
84
+ "epoch": 0.65,
85
+ "learning_rate": 4.377031419284941e-05,
86
+ "loss": 0.3519,
87
+ "step": 221
88
+ },
89
+ {
90
+ "epoch": 0.7,
91
+ "learning_rate": 4.284940411700975e-05,
92
+ "loss": 0.3079,
93
+ "step": 238
94
+ },
95
+ {
96
+ "epoch": 0.75,
97
+ "learning_rate": 4.1928494041170105e-05,
98
+ "loss": 0.2125,
99
+ "step": 255
100
+ },
101
+ {
102
+ "epoch": 0.8,
103
+ "learning_rate": 4.1007583965330447e-05,
104
+ "loss": 0.1151,
105
+ "step": 272
106
+ },
107
+ {
108
+ "epoch": 0.85,
109
+ "learning_rate": 4.0086673889490795e-05,
110
+ "loss": 0.3185,
111
+ "step": 289
112
+ },
113
+ {
114
+ "epoch": 0.89,
115
+ "learning_rate": 3.916576381365114e-05,
116
+ "loss": 0.1969,
117
+ "step": 306
118
+ },
119
+ {
120
+ "epoch": 0.94,
121
+ "learning_rate": 3.8244853737811486e-05,
122
+ "loss": 0.2635,
123
+ "step": 323
124
+ },
125
+ {
126
+ "epoch": 0.99,
127
+ "learning_rate": 3.7323943661971835e-05,
128
+ "loss": 0.2407,
129
+ "step": 340
130
+ },
131
+ {
132
+ "epoch": 1.0,
133
+ "eval_accuracy": 0.9633967789165446,
134
+ "eval_auc": 0.9964920446122287,
135
+ "eval_f1": 0.9614791987673343,
136
+ "eval_loss": 0.10120841860771179,
137
+ "eval_precision": 0.9397590361445783,
138
+ "eval_recall": 0.9842271293375394,
139
+ "eval_runtime": 12.6432,
140
+ "eval_samples_per_second": 54.021,
141
+ "eval_steps_per_second": 3.401,
142
+ "step": 342
143
+ },
144
+ {
145
+ "epoch": 1.04,
146
+ "learning_rate": 3.640303358613218e-05,
147
+ "loss": 0.18,
148
+ "step": 357
149
+ },
150
+ {
151
+ "epoch": 1.09,
152
+ "learning_rate": 3.5482123510292526e-05,
153
+ "loss": 0.167,
154
+ "step": 374
155
+ },
156
+ {
157
+ "epoch": 1.14,
158
+ "learning_rate": 3.4561213434452874e-05,
159
+ "loss": 0.2256,
160
+ "step": 391
161
+ },
162
+ {
163
+ "epoch": 1.19,
164
+ "learning_rate": 3.3640303358613216e-05,
165
+ "loss": 0.144,
166
+ "step": 408
167
+ },
168
+ {
169
+ "epoch": 1.24,
170
+ "learning_rate": 3.2719393282773565e-05,
171
+ "loss": 0.108,
172
+ "step": 425
173
+ },
174
+ {
175
+ "epoch": 1.29,
176
+ "learning_rate": 3.179848320693391e-05,
177
+ "loss": 0.2115,
178
+ "step": 442
179
+ },
180
+ {
181
+ "epoch": 1.34,
182
+ "learning_rate": 3.087757313109426e-05,
183
+ "loss": 0.1777,
184
+ "step": 459
185
+ },
186
+ {
187
+ "epoch": 1.39,
188
+ "learning_rate": 2.9956663055254608e-05,
189
+ "loss": 0.2568,
190
+ "step": 476
191
+ },
192
+ {
193
+ "epoch": 1.44,
194
+ "learning_rate": 2.9035752979414954e-05,
195
+ "loss": 0.1353,
196
+ "step": 493
197
+ },
198
+ {
199
+ "epoch": 1.49,
200
+ "learning_rate": 2.81148429035753e-05,
201
+ "loss": 0.1635,
202
+ "step": 510
203
+ },
204
+ {
205
+ "epoch": 1.54,
206
+ "learning_rate": 2.7193932827735648e-05,
207
+ "loss": 0.244,
208
+ "step": 527
209
+ },
210
+ {
211
+ "epoch": 1.59,
212
+ "learning_rate": 2.6273022751895993e-05,
213
+ "loss": 0.1751,
214
+ "step": 544
215
+ },
216
+ {
217
+ "epoch": 1.64,
218
+ "learning_rate": 2.535211267605634e-05,
219
+ "loss": 0.1629,
220
+ "step": 561
221
+ },
222
+ {
223
+ "epoch": 1.69,
224
+ "learning_rate": 2.4431202600216684e-05,
225
+ "loss": 0.2097,
226
+ "step": 578
227
+ },
228
+ {
229
+ "epoch": 1.74,
230
+ "learning_rate": 2.3510292524377033e-05,
231
+ "loss": 0.1262,
232
+ "step": 595
233
+ },
234
+ {
235
+ "epoch": 1.79,
236
+ "learning_rate": 2.258938244853738e-05,
237
+ "loss": 0.0854,
238
+ "step": 612
239
+ },
240
+ {
241
+ "epoch": 1.84,
242
+ "learning_rate": 2.1668472372697727e-05,
243
+ "loss": 0.3253,
244
+ "step": 629
245
+ },
246
+ {
247
+ "epoch": 1.89,
248
+ "learning_rate": 2.0747562296858072e-05,
249
+ "loss": 0.0901,
250
+ "step": 646
251
+ },
252
+ {
253
+ "epoch": 1.94,
254
+ "learning_rate": 1.9826652221018418e-05,
255
+ "loss": 0.0816,
256
+ "step": 663
257
+ },
258
+ {
259
+ "epoch": 1.99,
260
+ "learning_rate": 1.8905742145178766e-05,
261
+ "loss": 0.1621,
262
+ "step": 680
263
+ },
264
+ {
265
+ "epoch": 2.0,
266
+ "eval_accuracy": 0.9853587115666179,
267
+ "eval_auc": 0.9993104755994553,
268
+ "eval_f1": 0.9843260188087775,
269
+ "eval_loss": 0.039207685738801956,
270
+ "eval_precision": 0.9781931464174455,
271
+ "eval_recall": 0.9905362776025236,
272
+ "eval_runtime": 12.5653,
273
+ "eval_samples_per_second": 54.356,
274
+ "eval_steps_per_second": 3.422,
275
+ "step": 684
276
+ },
277
+ {
278
+ "epoch": 2.04,
279
+ "learning_rate": 1.7984832069339115e-05,
280
+ "loss": 0.154,
281
+ "step": 697
282
+ },
283
+ {
284
+ "epoch": 2.09,
285
+ "learning_rate": 1.706392199349946e-05,
286
+ "loss": 0.1426,
287
+ "step": 714
288
+ },
289
+ {
290
+ "epoch": 2.14,
291
+ "learning_rate": 1.6143011917659806e-05,
292
+ "loss": 0.1294,
293
+ "step": 731
294
+ },
295
+ {
296
+ "epoch": 2.19,
297
+ "learning_rate": 1.5222101841820153e-05,
298
+ "loss": 0.0665,
299
+ "step": 748
300
+ },
301
+ {
302
+ "epoch": 2.24,
303
+ "learning_rate": 1.4301191765980498e-05,
304
+ "loss": 0.0233,
305
+ "step": 765
306
+ },
307
+ {
308
+ "epoch": 2.29,
309
+ "learning_rate": 1.3380281690140845e-05,
310
+ "loss": 0.0811,
311
+ "step": 782
312
+ },
313
+ {
314
+ "epoch": 2.34,
315
+ "learning_rate": 1.2459371614301192e-05,
316
+ "loss": 0.0786,
317
+ "step": 799
318
+ },
319
+ {
320
+ "epoch": 2.39,
321
+ "learning_rate": 1.153846153846154e-05,
322
+ "loss": 0.1104,
323
+ "step": 816
324
+ },
325
+ {
326
+ "epoch": 2.44,
327
+ "learning_rate": 1.0617551462621887e-05,
328
+ "loss": 0.1425,
329
+ "step": 833
330
+ },
331
+ {
332
+ "epoch": 2.49,
333
+ "learning_rate": 9.696641386782232e-06,
334
+ "loss": 0.0252,
335
+ "step": 850
336
+ },
337
+ {
338
+ "epoch": 2.54,
339
+ "learning_rate": 8.775731310942579e-06,
340
+ "loss": 0.1767,
341
+ "step": 867
342
+ },
343
+ {
344
+ "epoch": 2.58,
345
+ "learning_rate": 7.90899241603467e-06,
346
+ "loss": 0.0504,
347
+ "step": 884
348
+ },
349
+ {
350
+ "epoch": 2.63,
351
+ "learning_rate": 6.988082340195017e-06,
352
+ "loss": 0.1102,
353
+ "step": 901
354
+ },
355
+ {
356
+ "epoch": 2.68,
357
+ "learning_rate": 6.067172264355364e-06,
358
+ "loss": 0.11,
359
+ "step": 918
360
+ },
361
+ {
362
+ "epoch": 2.73,
363
+ "learning_rate": 5.14626218851571e-06,
364
+ "loss": 0.0775,
365
+ "step": 935
366
+ },
367
+ {
368
+ "epoch": 2.78,
369
+ "learning_rate": 4.225352112676056e-06,
370
+ "loss": 0.132,
371
+ "step": 952
372
+ },
373
+ {
374
+ "epoch": 2.83,
375
+ "learning_rate": 3.304442036836403e-06,
376
+ "loss": 0.1459,
377
+ "step": 969
378
+ },
379
+ {
380
+ "epoch": 2.88,
381
+ "learning_rate": 2.38353196099675e-06,
382
+ "loss": 0.0426,
383
+ "step": 986
384
+ },
385
+ {
386
+ "epoch": 2.93,
387
+ "learning_rate": 1.4626218851570965e-06,
388
+ "loss": 0.2774,
389
+ "step": 1003
390
+ },
391
+ {
392
+ "epoch": 2.98,
393
+ "learning_rate": 5.417118093174432e-07,
394
+ "loss": 0.0373,
395
+ "step": 1020
396
+ },
397
+ {
398
+ "epoch": 3.0,
399
+ "eval_accuracy": 0.9897510980966325,
400
+ "eval_auc": 0.9995604281946529,
401
+ "eval_f1": 0.9889064976228209,
402
+ "eval_loss": 0.028854751959443092,
403
+ "eval_precision": 0.9936305732484076,
404
+ "eval_recall": 0.9842271293375394,
405
+ "eval_runtime": 12.521,
406
+ "eval_samples_per_second": 54.548,
407
+ "eval_steps_per_second": 3.434,
408
+ "step": 1026
409
+ }
410
+ ],
411
+ "logging_steps": 17,
412
+ "max_steps": 1026,
413
+ "num_input_tokens_seen": 0,
414
+ "num_train_epochs": 3,
415
+ "save_steps": 500,
416
+ "total_flos": 6.35124346881196e+17,
417
+ "train_batch_size": 8,
418
+ "trial_name": null,
419
+ "trial_params": null
420
+ }
checkpoint-1026/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d6807759d4ca960e03f26c891d53499680604b0eda1c5a87063a06cb2f42a11
3
+ size 4728
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Falconsai/nsfw_image_detection",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "normal",
14
+ "1": "nsfw"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "normal": 0,
21
+ "nsfw": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.36.1"
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f980071de09d8ac3c73d02e88884cce7ba54433533689fb4f9d7514f7e668543
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d6807759d4ca960e03f26c891d53499680604b0eda1c5a87063a06cb2f42a11
3
+ size 4728
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "Zmu/autotrain-data-autotrain-81svx-t510h",
3
+ "model": "Falconsai/nsfw_image_detection",
4
+ "username": "Zmu",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "/tmp/model",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp16",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "Zmu/autotrain-81svx-t510h",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }