File size: 2,997 Bytes
8e851a8
 
 
dcb538a
8e851a8
 
 
dcb538a
8e851a8
 
 
 
 
 
 
 
 
 
 
 
 
 
62ff382
8e851a8
 
66b3d7b
8e851a8
 
64b01d4
3d4b1ac
8e851a8
 
2481812
1402b62
 
 
66b3d7b
 
 
 
8e851a8
 
 
 
 
 
 
 
 
 
c7e2f5d
6d2fbd1
c7e2f5d
e2afd26
8e851a8
 
 
 
0ae9e18
8e851a8
e2afd26
8e851a8
 
 
 
 
649e06f
62ff382
 
8e851a8
 
 
62ff382
1bfb35d
8e851a8
dcb538a
8e851a8
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
try:
    import detectron2
except:
    import os
    os.system('pip install git+https://github.com/facebookresearch/detectron2.git')

import cv2
import torch
from matplotlib.pyplot import axis
import gradio as gr
import requests
import numpy as np
from torch import nn
import requests

import torch

from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode


model_path = "https://huggingface.co/asalhi85/Smartathon-Detectron2/resolve/9f4d573340b033e651d4937906f23850f9b6bc57/phase2_detectron_model.pth"

cfg = get_cfg()
cfg.merge_from_file("./faster_rcnn_X_101_32x8d_FPN_3x.yaml")
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 11
cfg.MODEL.WEIGHTS = model_path

my_metadata = MetadataCatalog.get("dbmdz_coco_all")
#my_metadata.thing_classes = ["GRAFFITI", "FADED_SIGNAGE","POTHOLES","GARBAGE","CONSTRUCTION_ROAD","BROKEN_SIGNAGE","BAD_STREETLIGHT","BAD_BILLBOARD","SAND_ON_ROAD","CLUTTER_SIDEWALK","UNKEPT_FACADE"]
my_metadata.thing_classes = ["None", "BAD_BILLBOARD","BROKEN_SIGNAGE","CLUTTER_SIDEWALK","CONSTRUCTION_ROAD","FADED_SIGNAGE","GARBAGE","GRAFFITI","POTHOLES","SAND_ON_ROAD","UNKEPT_FACADE"]

# #smart_dict={'GRAFFITI' : 0.0 , 'FADED_SIGNAGE': 1.0 , 'POTHOLES': 2.0,
#             'GARBAGE' : 3.0 , 'CONSTRUCTION_ROAD': 4.0 , 'BROKEN_SIGNAGE': 5.0,
#             'BAD_STREETLIGHT' : 6.0 , 'BAD_BILLBOARD': 7.0 , 'SAND_ON_ROAD':8.0,
#             'CLUTTER_SIDEWALK' : 9.0 , 'UNKEPT_FACADE': 10.0}
if not torch.cuda.is_available():
    cfg.MODEL.DEVICE = "cpu"


def inference(image_url, image, min_score):
    if image_url:
        r = requests.get(image_url)
        if r:
            im = np.frombuffer(r.content, dtype="uint8")
            im = cv2.imdecode(im, cv2.IMREAD_COLOR)
    else:
        im = cv2.imread(image)
        # Model expect BGR!
        #im = image[:,:,::-1]

    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
    predictor = DefaultPredictor(cfg)

    outputs = predictor(im)

    v = Visualizer(im[:,:,::-1], my_metadata, scale=1.2, instance_mode=ColorMode.IMAGE )
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))

    return out.get_image()


title = "Smartathon Phase2 Demo - Baseer"
description = "This demo introduces an interactive playground for our trained Detectron2 model."
article = '<p>Detectron model is available from our repository <a href="https://github.com/asalhi/Smartathon-Baseer">here</a>.</p>'

gr.Interface(
    inference,
    [gr.inputs.Textbox(label="Image URL", placeholder=""),
     gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image"),
     gr.Slider(minimum=0.0, maximum=1.0, value=0.5, label="Minimum score"),
    ],
    gr.outputs.Image(type="pil", label="Output"),
    title=title,
    description=description,
    article=article,
    examples=[]).launch()