enessehirli commited on
Commit
d3bcd1d
1 Parent(s): 428881c

Upload 15 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/4969.jpg filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import os
4
+ import torch
5
+
6
+ from model import create_vit_model
7
+ from timeit import default_timer as timer
8
+ from typing import Tuple, Dict
9
+
10
+ class_names = ['dew',
11
+ 'fogsmog',
12
+ 'frost',
13
+ 'glaze',
14
+ 'hail',
15
+ 'lightning',
16
+ 'rain',
17
+ 'rainbow',
18
+ 'rime',
19
+ 'sandstorm',
20
+ 'snow']
21
+
22
+ vitb16, vitb16_transforms = create_vit_model(num_classes=len(class_names))
23
+
24
+ vitb16.load_state_dict(
25
+ torch.load("vitb16_feature_extractor_weather_rcg.pth",
26
+ map_location=torch.device("cpu")
27
+ )
28
+ )
29
+
30
+ def predict(img):
31
+
32
+ start_timer = timer()
33
+
34
+ img = vitb16_transforms(img).unsqueeze(0)
35
+
36
+ vitb16.eval()
37
+ with torch.inference_mode():
38
+ pred_probs = torch.softmax(vitb16(img), dim=1)
39
+
40
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
41
+
42
+ pred_timer = round(timer()- start_timer, 4)
43
+
44
+ return pred_labels_and_probs, pred_timer
45
+
46
+
47
+ title = "Wather Recognition"
48
+
49
+ description = "A ViTb16 Feature Extractor CV model to recognize weather conditions"
50
+
51
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
52
+
53
+ demo = gr.Interface(
54
+ fn=predict,
55
+ inputs=gr.inputs.Image(type="pil"),
56
+ outputs=[
57
+ gr.Label(num_top_classes=11, label="Predictions"),
58
+ gr.Number(label="Prediction time(s)")],
59
+ examples=example_list,
60
+ title=title,
61
+ description=description
62
+ )
63
+
64
+ demo.launch()
examples/0384.jpg ADDED
examples/0821.jpg ADDED
examples/1303.jpg ADDED
examples/2115.jpg ADDED
examples/2484.jpg ADDED
examples/3541.jpg ADDED
examples/3927.jpg ADDED
examples/4793.jpg ADDED
examples/4969.jpg ADDED

Git LFS Details

  • SHA256: bb74f4b5a34a5fc45daa99bea03b68e4593988cda72ad8683e84a5d8c073527f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.07 MB
examples/5030.jpg ADDED
examples/6498.jpg ADDED
model.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torch import nn
5
+
6
+ def create_vit_model(num_classes:int=11, seed: int=42):
7
+ vitb16_weights = torchvision.models.ViT_B_16_Weights.DEFAULT
8
+
9
+ vitb16_transforms = vitb16_weights.transforms()
10
+
11
+ model = torchvision.models.vit_b_16(weights=vitb16_weights)
12
+
13
+ for param in model.parameters():
14
+ param.requires_grad = False
15
+
16
+ torch.manual_seed(seed)
17
+ model.heads = nn.Sequential(
18
+ nn.Linear(in_features=768, out_features=num_classes)
19
+ )
20
+
21
+ return model, vitb16_transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch==2.2.1
2
+ torchvision==0.17.1
3
+ gradio==4.28.3
vitb16_feature_extractor_weather_rcg.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b425ff809b0027fffa43c4bae5071376afc2ced56bc8f30e8a8060b9efba068a
3
+ size 343291310