File size: 6,065 Bytes
8e851a8
 
 
dcb538a
8e851a8
 
 
dcb538a
8e851a8
 
 
 
 
 
 
 
 
 
 
 
 
 
62ff382
8e851a8
 
66b3d7b
8e851a8
 
64b01d4
3d4b1ac
8e851a8
 
2481812
1402b62
 
 
66b3d7b
 
 
 
8e851a8
 
 
 
 
 
 
 
 
 
c7e2f5d
6d2fbd1
c7e2f5d
e2afd26
8e851a8
 
 
 
0ae9e18
8e851a8
e2afd26
8e851a8
 
 
 
 
649e06f
62ff382
 
8e851a8
4efca65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e5a63d9
4efca65
 
48a9237
a9b03c2
0a1cb88
 
 
8f56e3c
4efca65
85b5fba
4c88fee
0a1cb88
4c88fee
0a1cb88
4c88fee
734f6c3
 
85b5fba
99d4997
 
 
 
 
 
 
 
4efca65
 
 
4c88fee
4efca65
48a9237
 
4efca65
734f6c3
 
4c88fee
d5ae9c0
4c88fee
 
 
8f56e3c
4c88fee
4efca65
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
try:
    import detectron2
except:
    import os
    os.system('pip install git+https://github.com/facebookresearch/detectron2.git')

import cv2
import torch
from matplotlib.pyplot import axis
import gradio as gr
import requests
import numpy as np
from torch import nn
import requests

import torch

from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode


model_path = "https://huggingface.co/asalhi85/Smartathon-Detectron2/resolve/9f4d573340b033e651d4937906f23850f9b6bc57/phase2_detectron_model.pth"

cfg = get_cfg()
cfg.merge_from_file("./faster_rcnn_X_101_32x8d_FPN_3x.yaml")
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 11
cfg.MODEL.WEIGHTS = model_path

my_metadata = MetadataCatalog.get("dbmdz_coco_all")
#my_metadata.thing_classes = ["GRAFFITI", "FADED_SIGNAGE","POTHOLES","GARBAGE","CONSTRUCTION_ROAD","BROKEN_SIGNAGE","BAD_STREETLIGHT","BAD_BILLBOARD","SAND_ON_ROAD","CLUTTER_SIDEWALK","UNKEPT_FACADE"]
my_metadata.thing_classes = ["None", "BAD_BILLBOARD","BROKEN_SIGNAGE","CLUTTER_SIDEWALK","CONSTRUCTION_ROAD","FADED_SIGNAGE","GARBAGE","GRAFFITI","POTHOLES","SAND_ON_ROAD","UNKEPT_FACADE"]

# #smart_dict={'GRAFFITI' : 0.0 , 'FADED_SIGNAGE': 1.0 , 'POTHOLES': 2.0,
#             'GARBAGE' : 3.0 , 'CONSTRUCTION_ROAD': 4.0 , 'BROKEN_SIGNAGE': 5.0,
#             'BAD_STREETLIGHT' : 6.0 , 'BAD_BILLBOARD': 7.0 , 'SAND_ON_ROAD':8.0,
#             'CLUTTER_SIDEWALK' : 9.0 , 'UNKEPT_FACADE': 10.0}
if not torch.cuda.is_available():
    cfg.MODEL.DEVICE = "cpu"


def inference(image_url, image, min_score):
    if image_url:
        r = requests.get(image_url)
        if r:
            im = np.frombuffer(r.content, dtype="uint8")
            im = cv2.imdecode(im, cv2.IMREAD_COLOR)
    else:
        im = cv2.imread(image)
        # Model expect BGR!
        #im = image[:,:,::-1]

    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score
    predictor = DefaultPredictor(cfg)

    outputs = predictor(im)

    v = Visualizer(im[:,:,::-1], my_metadata, scale=1.2, instance_mode=ColorMode.IMAGE )
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))

    return out.get_image()


title = "Smartathon Phase2 Demo - Baseer"
description = "This demo introduces an interactive playground for our trained Detectron2 model."
article = '<p>Detectron model is available from our repository <a href="https://github.com/asalhi/Smartathon-Baseer">here</a>.</p>'

# gr.Interface(
#     inference,
#     [gr.inputs.Textbox(label="Image URL", placeholder=""),
#      gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image"),
#      gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score"),
#     ],
#     gr.outputs.Image(type="pil", label="Output"),
#     #gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image")),
#     title=title,
#     description=description,
#     article=article,
#     #examples=[['./d1.jpeg'], ['./d2.jpeg'], ['./d3.jpeg'],['./d4.jpeg'],['./d5.jpeg'],['./d6.jpeg']],
#     examples = gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image")),
#     cache_examples=False).launch()
#     #examples=['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg']


with gr.Blocks(title=title,
                    css=".gradio-container {background:white;}"
               ) as demo:

    gr.HTML("""<h4 style="font-weight:bold; text-align:center; color:navy;">"Smartathon Phase2 Demo - Baseer"</h4>""")
    # #
    #gr.HTML("""<h5 style="color:navy;">1- Select an example by clicking a thumbnail below.</h5>""")
    gr.HTML("""<h5 style="color:navy;">1- Select an example by clicking a thumbnail below.<br>
                                       2- Or upload an image by clicking on the canvas.<br>
                                       3- Or insert direct url of an image.</h5>""")

    with gr.Row():
        with gr.Column():
            #gr.HTML("""<h5 style="color:navy;">3- Or insert direct url of an image.</h5>""")
            input_url = gr.Textbox(label="Image URL", placeholder="")
            #gr.HTML("""<h5 style="color:navy;">2- Or upload an image by clicking on the canvas.<br></h5>""")
            input_image = gr.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image")
            gr.HTML("""<h5 style="color:navy;">4- You can use this slider to control boxes min score: </h5>""")
            sliderr = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score")
        output_image = gr.Image(type="pil", label="Output")

    # gr.Interface(
    #     inference,
    #     [gr.inputs.Textbox(label="Image URL", placeholder=""),
    #      gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image"),
    #      gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score"),
    #     ],


    gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=input_image)

    #gr.HTML("""<br/>""")





    gr.HTML("""<h5 style="color:navy;">5- Then, click "Submit" button to predict object instances. It will take about 15-20 seconds (on cpu)</h5>""")
    send_btn = gr.Button("Submit")
    send_btn.click(fn=inference, inputs=[input_url,input_image,sliderr], outputs=[output_image], api_name="find")

    #gr.HTML("""<h5 style="color:navy;">Reference</h5>""")
    #gr.HTML("""<ul>""")
    gr.HTML("""<h5 style="color:navy;">Detectron model is available from our repository <a href="https://github.com/asalhi/Smartathon-Baseer">here</a>.</h5>""")
    #gr.HTML("""</ul>""")


#demo.queue()
demo.launch() # debug=True)