File size: 2,148 Bytes
24910f2
a4c8bb5
 
24910f2
e3dc417
 
b17bb63
24910f2
 
 
 
 
 
a4c8bb5
24910f2
b17bb63
24910f2
 
b17bb63
 
 
 
 
4378f9c
24910f2
a4c8bb5
4378f9c
b17bb63
 
 
4378f9c
b942514
24910f2
59bd0c5
24910f2
a4c8bb5
bd1243e
b17bb63
bd1243e
 
24910f2
59bd0c5
b17bb63
a4c8bb5
 
b17bb63
24910f2
59bd0c5
 
b17bb63
 
 
24910f2
 
e06d371
59bd0c5
 
b17bb63
 
 
24910f2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
"""
building-segmentation
Proof of concept showing effectiveness of a fine tuned instance segmentation model for deteting buildings.
"""
import os
import cv2
os.system("pip install 'git+https://github.com/facebookresearch/detectron2.git'")
from transformers import DetrFeatureExtractor, DetrForSegmentation
from PIL import Image
import gradio as gr
import numpy as np
import torch
import torchvision
import detectron2

# import some common detectron2 utilities
import itertools
import seaborn as sns
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.checkpoint import DetectionCheckpointer

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file('COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml'))
cfg.MODEL.DEVICE='cpu'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 
cfg.MODEL.WEIGHTS = "model_weights/chatswood_buildings_poc.pth"  
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 8
predictor = DefaultPredictor(cfg)

def segment_buildings(im):

    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    out_im = out.get_image()[:, :, ::-1]
    return out_im

# gradio components 
"""
gr_slider_confidence = gr.inputs.Slider(0,1,.1,.7,
                                        label='Set confidence threshold % for masks')
"""
# gradio outputs
inputs = gr.inputs.Image(type="pil", label="Input Image")
outputs = gr.outputs.Image(type="pil", label="Output Image")

title = "Building Segmentation"
description = "An instance segmentation demo for identifying boundaries of buildings in aerial images using DETR (End-to-End Object Detection) model with MaskRCNN-101 backbone"

# Create user interface and launch
gr.Interface(segment_buildings, 
                inputs = inputs,
                outputs = outputs,
                title = title,
                enable_queue = True,
                description = description).launch()