try: import detectron2 except: import os os.system('pip install git+https://github.com/facebookresearch/detectron2.git') import cv2 import torch from matplotlib.pyplot import axis import gradio as gr import requests import numpy as np from torch import nn import requests import torch from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog from detectron2.utils.visualizer import ColorMode model_path = "https://huggingface.co/asalhi85/Smartathon-Detectron2/resolve/9f4d573340b033e651d4937906f23850f9b6bc57/phase2_detectron_model.pth" cfg = get_cfg() cfg.merge_from_file("./faster_rcnn_X_101_32x8d_FPN_3x.yaml") cfg.MODEL.ROI_HEADS.NUM_CLASSES = 11 cfg.MODEL.WEIGHTS = model_path my_metadata = MetadataCatalog.get("dbmdz_coco_all") #my_metadata.thing_classes = ["GRAFFITI", "FADED_SIGNAGE","POTHOLES","GARBAGE","CONSTRUCTION_ROAD","BROKEN_SIGNAGE","BAD_STREETLIGHT","BAD_BILLBOARD","SAND_ON_ROAD","CLUTTER_SIDEWALK","UNKEPT_FACADE"] my_metadata.thing_classes = ["None", "BAD_BILLBOARD","BROKEN_SIGNAGE","CLUTTER_SIDEWALK","CONSTRUCTION_ROAD","FADED_SIGNAGE","GARBAGE","GRAFFITI","POTHOLES","SAND_ON_ROAD","UNKEPT_FACADE"] # #smart_dict={'GRAFFITI' : 0.0 , 'FADED_SIGNAGE': 1.0 , 'POTHOLES': 2.0, # 'GARBAGE' : 3.0 , 'CONSTRUCTION_ROAD': 4.0 , 'BROKEN_SIGNAGE': 5.0, # 'BAD_STREETLIGHT' : 6.0 , 'BAD_BILLBOARD': 7.0 , 'SAND_ON_ROAD':8.0, # 'CLUTTER_SIDEWALK' : 9.0 , 'UNKEPT_FACADE': 10.0} if not torch.cuda.is_available(): cfg.MODEL.DEVICE = "cpu" def inference(image_url, image, min_score): if image_url: r = requests.get(image_url) if r: im = np.frombuffer(r.content, dtype="uint8") im = cv2.imdecode(im, cv2.IMREAD_COLOR) else: im = cv2.imread(image) # Model expect BGR! #im = image[:,:,::-1] cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_score predictor = DefaultPredictor(cfg) outputs = predictor(im) v = Visualizer(im[:,:,::-1], my_metadata, scale=1.2, instance_mode=ColorMode.IMAGE ) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) return out.get_image() title = "Smartathon Phase2 Demo - Baseer" description = "This demo introduces an interactive playground for our trained Detectron2 model." article = '
Detectron model is available from our repository here.
' # gr.Interface( # inference, # [gr.inputs.Textbox(label="Image URL", placeholder=""), # gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image"), # gr.Slider(minimum=0.0, maximum=1.0, value=0.4, label="Minimum score"), # ], # gr.outputs.Image(type="pil", label="Output"), # #gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image")), # title=title, # description=description, # article=article, # #examples=[['./d1.jpeg'], ['./d2.jpeg'], ['./d3.jpeg'],['./d4.jpeg'],['./d5.jpeg'],['./d6.jpeg']], # examples = gr.Examples(['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'], inputs=gr.inputs.Image(type="filepath", image_mode="RGB", source="upload", optional=False, label="Input Image")), # cache_examples=False).launch() # #examples=['./d1.jpeg', './d2.jpeg', './d3.jpeg','./d4.jpeg','./d5.jpeg','./d6.jpeg'] with gr.Blocks(title=title, css=".gradio-container {background:white;}" ) as demo: gr.HTML("""