nurhikam commited on
Commit
b1e25a5
1 Parent(s): dc2557e

commit files to HF hub

Browse files
README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - image-classification
4
+ - pytorch
5
+ - huggingpics
6
+ metrics:
7
+ - accuracy
8
+
9
+ model-index:
10
+ - name: BlurOrBokeh
11
+ results:
12
+ - task:
13
+ name: Image Classification
14
+ type: image-classification
15
+ metrics:
16
+ - name: Accuracy
17
+ type: accuracy
18
+ value: 0.9825174808502197
19
+ ---
20
+
21
+ # BlurOrBokeh
22
+
23
+
24
+ Blur Or Bokeh Model
config.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/swinv2-base-patch4-window8-256",
3
+ "architectures": [
4
+ "Swinv2ForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 18,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 128,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 1024,
19
+ "id2label": {
20
+ "0": "Blur",
21
+ "1": "Bokeh",
22
+ "2": "Normal"
23
+ },
24
+ "image_size": 256,
25
+ "initializer_range": 0.02,
26
+ "label2id": {
27
+ "Blur": "0",
28
+ "Bokeh": "1",
29
+ "Normal": "2"
30
+ },
31
+ "layer_norm_eps": 1e-05,
32
+ "mlp_ratio": 4.0,
33
+ "model_type": "swinv2",
34
+ "num_channels": 3,
35
+ "num_heads": [
36
+ 4,
37
+ 8,
38
+ 16,
39
+ 32
40
+ ],
41
+ "num_layers": 4,
42
+ "out_features": [
43
+ "stage4"
44
+ ],
45
+ "out_indices": [
46
+ 4
47
+ ],
48
+ "patch_size": 4,
49
+ "path_norm": true,
50
+ "pretrained_window_sizes": [
51
+ 0,
52
+ 0,
53
+ 0,
54
+ 0
55
+ ],
56
+ "problem_type": "single_label_classification",
57
+ "qkv_bias": true,
58
+ "stage_names": [
59
+ "stem",
60
+ "stage1",
61
+ "stage2",
62
+ "stage3",
63
+ "stage4"
64
+ ],
65
+ "torch_dtype": "float32",
66
+ "transformers_version": "4.41.1",
67
+ "use_absolute_embeddings": false,
68
+ "window_size": 8
69
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dde120b3859cdefb4695ceaf8c513ed479ed67f56944ce5e301e12cb416b4bf2
3
+ size 347649588
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.485,
21
+ 0.456,
22
+ 0.406
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.229,
27
+ 0.224,
28
+ 0.225
29
+ ],
30
+ "resample": 3,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 256,
34
+ "width": 256
35
+ }
36
+ }
runs/events.out.tfevents.1716471883.9e16d6303809.365.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f3a910f40c8d9f1ddf921f31573566200f992fc6182c66e61b1cfc29c4b3ea0
3
+ size 5368