VarunSivamani
commited on
Commit
•
5adf9b6
1
Parent(s):
0f6eb5a
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import config
|
4 |
+
from utils import *
|
5 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
6 |
+
from YoloV3Lightning import YOLOv3LightningModel
|
7 |
+
|
8 |
+
examples = [[f'examples/100{i}.jpg'] for i in range(7)]
|
9 |
+
scaled_anchors = config.scaled_anchors
|
10 |
+
|
11 |
+
model = YOLOv3LightningModel()
|
12 |
+
model.load_state_dict(torch.load("S13_model.pth", map_location="cpu"), strict=False)
|
13 |
+
model.eval()
|
14 |
+
|
15 |
+
@torch.inference_mode()
|
16 |
+
def yolo_predict(image, iou_thresh=0.5, thresh=0.5):
|
17 |
+
transformed_image = config.transforms(image=image)["image"].unsqueeze(0)
|
18 |
+
output = model(transformed_image)
|
19 |
+
|
20 |
+
bboxes = [[] for _ in range(1)]
|
21 |
+
for i in range(3):
|
22 |
+
batch_size, A, S, _, _ = output[i].shape
|
23 |
+
anchor = scaled_anchors[i]
|
24 |
+
boxes_scale_i = cells_to_bboxes(
|
25 |
+
output[i], anchor, S=S, is_preds=True
|
26 |
+
)
|
27 |
+
for idx, (box) in enumerate(boxes_scale_i):
|
28 |
+
bboxes[idx] += box
|
29 |
+
|
30 |
+
nms_boxes = non_max_suppression(
|
31 |
+
bboxes[0], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
|
32 |
+
)
|
33 |
+
plot_img = draw_bounding_boxes(image.copy(), nms_boxes, class_labels=config.PASCAL_CLASSES)
|
34 |
+
|
35 |
+
return plot_img
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
tab1 = gr.Interface(
|
40 |
+
yolo_predict,
|
41 |
+
inputs=[
|
42 |
+
gr.Image(label="Input Image"),
|
43 |
+
gr.Slider(0, 1, value=0.5, step=0.1, label="IOU Threshold", info="Define IOU Threshold value"),
|
44 |
+
gr.Slider(0, 1, value=0.5, step=0.1, label="Threshold", info="Define Threshold value"),
|
45 |
+
],
|
46 |
+
outputs=[
|
47 |
+
gr.Image(label="YoloV3 Object Detection"),
|
48 |
+
],
|
49 |
+
examples=examples,
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
+
def GradCAM(image, target_layer=-5, transparency=0.5, show_cam=True):
|
54 |
+
if show_cam:
|
55 |
+
cam = YoloCAM(model=model, target_layers=[model.layers[target_layer]], use_cuda=False)
|
56 |
+
transformed_image = config.transforms(image=image)["image"].unsqueeze(0)
|
57 |
+
grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
|
58 |
+
img = cv2.resize(image, (416, 416))
|
59 |
+
img = np.float32(img) / 255
|
60 |
+
cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True, image_weight=transparency)
|
61 |
+
else:
|
62 |
+
cam_image = image
|
63 |
+
|
64 |
+
return cam_image
|
65 |
+
|
66 |
+
tab2 = gr.Interface(
|
67 |
+
GradCAM,
|
68 |
+
inputs=[
|
69 |
+
gr.Image(label="Input Image"),
|
70 |
+
gr.Slider(-5, -2, value=-3, step=-1, label="Network Layer", info="Which layer GRAD-CAM do you want to visualize?"),
|
71 |
+
gr.Slider(0, 1, value=0.5, step=0.1, label="Transparency", info="Define Transparency of GRAD-CAMs"),
|
72 |
+
gr.Checkbox(label="GradCAM", value=True, info="Visualize Class Activation Maps?"),
|
73 |
+
],
|
74 |
+
outputs=[
|
75 |
+
gr.Image(label="GradCAM Visualization"),
|
76 |
+
],
|
77 |
+
examples=examples,
|
78 |
+
)
|
79 |
+
|
80 |
+
demo = gr.TabbedInterface([tab1, tab2], ["YOLOV3 Detection", "GradCAM Visualization"])
|
81 |
+
demo.launch()
|