File size: 1,575 Bytes
955f4a2
74ae3e6
955f4a2
08aa404
 
 
 
 
955f4a2
 
 
 
08aa404
 
 
 
 
 
3c27500
08aa404
 
d1d6e36
 
08aa404
4a8f1bb
3c27500
4a8f1bb
d1d6e36
7b73b04
08aa404
955f4a2
08aa404
 
 
 
865c417
08aa404
458f9d6
bca82aa
4a8f1bb
bca82aa
 
458f9d6
08aa404
 
4a8f1bb
08aa404
 
d1d6e36
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os 
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')

import gradio as gr
import logging
from detectron2.engine import DefaultPredictor
import cv2
from detectron2.config import get_cfg
from utils import add_bboxes

# print(torch.__version__, torch.cuda.is_available())
# assert torch.__version__.startswith("1.9") 

config_file="config.yaml"
cfg = get_cfg()
cfg.merge_from_file(config_file)
cfg.MODEL.DEVICE="cpu"
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
# cfg.MODEL.WEIGHTS = "checkpoints_model_final_imagenet_40k_synthetic.pth"

def predict(
    model,
    img
):
    if model=="40k synthetic":
        weights = "checkpoints_model_final_imagenet_40k_synthetic.pth"
    elif model == "100k synthetic":
        weights = "checkpoints_model_final_imagenet_100k_synthetic.pth"
    cfg.MODEL.WEIGHTS=weights
    predictor = DefaultPredictor(cfg)
    im = cv2.imread(img.name)
    output = predictor(im)
    img = add_bboxes(im, output['instances'].pred_boxes, scores=output['instances'].scores)
    return img

title = "Indoor Pet Detection"
description = "Demo for Indoor Pet Detection"
examples = [
    ["40k synthetic", 'example.jpg'],
    ["100k synthetic", 'example.jpg'],
    ["40k synthetic", 'example-2.jpg'],
    ["100k synthetic", 'example-2.jpg']
]


gr.Interface(predict, [gr.inputs.Dropdown(["40k synthetic", "100k synthetic"]), gr.inputs.Image(type="file")], outputs=gr.outputs.Image(type="pil"),enable_queue=True, title=title,
    description=description,
    # article=article,
    examples=examples).launch(debug=True)