File size: 3,429 Bytes
9d2fa24
 
 
 
 
 
 
e0cda67
14128cd
f4a121f
e1395e2
f4a121f
bbf7617
 
8e2392f
 
d2d8887
bbf7617
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f47c68
d81cc69
 
 
2f47c68
d81cc69
2f47c68
a9943bc
 
0946317
b223943
422ce72
2f47c68
bbf7617
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f47c68
0946317
dcbfd14
 
bbf7617
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import subprocess
import sys
print("Reinstalling mmcv")
subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "-y", "mmcv-full==1.3.17"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "mmcv-full==1.3.17", "-f", "https://download.openmmlab.com/mmcv/dist/cpu/torch1.10.0/index.html"])
print("mmcv install complete") 

#from icevision.models import *
from icevision.models.checkpoint import *
from icevision.all import *
from icevision.models import mmdet
#import icedata
import PIL
import requests
import torch
from torchvision import transforms
import cv2
import gradio as gr

classes = ['Army_navy', 'Bulldog', 'Castroviejo', 'Forceps', 'Frazier', 'Hemostat', 'Iris',
           'Mayo_metz', 'Needle', 'Potts', 'Richardson', 'Scalpel', 'Towel_clip', 'Weitlaner', 'Yankauer']
class_map = ClassMap(classes)


metrics = [COCOMetric(metric_type=COCOMetricType.bbox)]


model_type = models.mmdet.vfnet
backbone = model_type.backbones.resnet50_fpn_mstrain_2x

checkpoint_path = 'VFNet_teacher_nov29_mAP82.6.pth'

checkpoint_and_model = model_from_checkpoint(checkpoint_path)

model_loaded = checkpoint_and_model["model"]
img_size = checkpoint_and_model["img_size"]

valid_tfms = tfms.A.Adapter(
    [*tfms.A.resize_and_pad(img_size), tfms.A.Normalize()])
    
# examples
#for root, dirs, files in os.walk(r'sample_images/'):
#    for filename in files:
#        print(filename)

#examples = ["sample_images/"+file for file in files] 

description1 = 'Tool for detecting 15 classes of surgical instruments:  Scalpel, Forceps, Suture needle, Clamps (Hemostat, Towel clip, Bulldog), Scissors (Mayo_metz, Iris, Potts), Needle holder (Castroviejo), Retractors (Army-navy, Richardson, Weitlaner), Suctions (Yankauer, Frazier).'

description2 = '\n \n Choose one of the examples below or use your own image of an instrument.  Click on the Submit button, allow for model prediction and see the bounding box and/or label result.'

examples=[['Image00001.jpg'],['Image00002.jpg'],['Image00003.jpg'],['Image00004.jpg'],['Image00005.jpg']]
    
def show_preds_gradio(input_image, display_label, display_bbox, detection_threshold):

    if detection_threshold == 0:
        detection_threshold = 0.5

    img = PIL.Image.fromarray(input_image, 'RGB')

    pred_dict = model_type.end2end_detect(img, valid_tfms, model_loaded, class_map=class_map, detection_threshold=detection_threshold,
                                          display_label=display_label, display_bbox=display_bbox, return_img=True,
                                          font_size=16, label_color="#FF59D6")

    return pred_dict['img']
    

display_chkbox_label = gr.inputs.Checkbox(label="Label", default=True)
display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True)

detection_threshold_slider = gr.inputs.Slider(
    minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold")

outputs = gr.outputs.Image(type="pil")

gr_interface = gr.Interface(fn=show_preds_gradio, inputs=["image", display_chkbox_label, display_chkbox_box,  detection_threshold_slider],
                            outputs=outputs, title='Surgical Instrument Detection and Identification Tool',  # , article=article, 
                            description = [description1,description2],
                            examples=examples, 
                            enable_queue=True)  ##


gr_interface.launch(inline=False, share=True, debug=True)