File size: 3,353 Bytes
02652ab
551e0bc
3b10901
a8c8ecd
02652ab
99f5e4a
330e9ae
5a7a90e
02652ab
 
 
 
 
 
 
802af0c
 
02652ab
 
 
 
 
802af0c
 
 
 
 
 
 
02652ab
d0012d1
0970ad6
d0012d1
02652ab
 
802af0c
 
 
 
 
 
 
02652ab
802af0c
d0012d1
 
02652ab
802af0c
3b10901
86164d1
802af0c
 
 
 
 
 
 
02652ab
 
92b1fb6
 
 
 
02652ab
d0012d1
02652ab
92b1fb6
02652ab
 
92b1fb6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os 
import sys
os.system("pip install imutils")

os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html')

os.system("pip install git+https://github.com/cocodataset/panopticapi.git")

import gradio as gr
# check pytorch installation: 
import detectron2
from detectron2.utils.logger import setup_logger

# import some common libraries
import numpy as np
import cv2
import torch

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
from detectron2.projects.deeplab import add_deeplab_config
coco_metadata = MetadataCatalog.get("coco_2017_val_panoptic")

# import Mask2Former project
from mask2former import add_maskformer2_config

from PIL import Image
import imutils

cfg = get_cfg()
cfg.MODEL.DEVICE='cpu'
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file("configs/coco/panoptic-segmentation/swin/maskformer2_swin_large_IN21k_384_bs16_100ep.yaml")
cfg.MODEL.WEIGHTS = 'https://dl.fbaipublicfiles.com/maskformer/mask2former/coco/panoptic/maskformer2_swin_large_IN21k_384_bs16_100ep/model_final_f07440.pkl'
cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True
cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = True
cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = True
predictor = DefaultPredictor(cfg)

os.system("wget https://i.imgur.com/Vj17K5z.jpg")

def inference(img):
    im = cv2.imread(img)
    im = imutils.resize(im, width=512)
    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1], coco_metadata, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
    panoptic_result = v.draw_panoptic_seg(outputs["panoptic_seg"][0].to("cpu"), outputs["panoptic_seg"][1]).get_image()
    v = Visualizer(im[:, :, ::-1], coco_metadata, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
    instance_result = v.draw_instance_predictions(outputs["instances"].to("cpu")).get_image()
    v = Visualizer(im[:, :, ::-1], coco_metadata, scale=1.2, instance_mode=ColorMode.IMAGE_BW)
    semantic_result = v.draw_sem_seg(outputs["sem_seg"].argmax(0).to("cpu")).get_image()    
    return Image.fromarray(np.uint8(panoptic_result)).convert('RGB'),Image.fromarray(np.uint8(instance_result)).convert('RGB'),Image.fromarray(np.uint8(semantic_result)).convert('RGB')
    

title = "Mask2Former"
description = "Gradio demo for Mask2Former: Masked-attention Mask Transformer for Universal Image Segmentation. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."

article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2112.01527' target='_blank'>Masked-attention Mask Transformer for Universal Image Segmentation</a> | <a href='https://github.com/facebookresearch/Mask2Former' target='_blank'>Github Repo</a></p>"

examples = [['Vj17K5z.jpg']]

gr.Interface(inference, inputs=gr.inputs.Image(type="filepath"), outputs=[gr.outputs.Image(label="Panoptic segmentation",type="pil"),gr.outputs.Image(label="instance segmentation",type="pil"),gr.outputs.Image(label="semantic segmentation",type="pil")], title=title,
    description=description,
    article=article,
    examples=examples).launch(enable_queue=True,cache_examples=True)