alibidaran commited on
Commit
69140ae
1 Parent(s): 0526480

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -8
app.py CHANGED
@@ -7,29 +7,114 @@ from torchvision import models,transforms
7
  core = Core()
8
 
9
  # Read model to OpenVINO Runtime
10
- model_ir = core.read_model(model="Davinci_eye.xml")
11
  compiled_model_ir = core.compile_model(model=model_ir, device_name='CPU')
12
 
13
  tfms = transforms.Compose([
14
  transforms.ToTensor(),
15
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
16
  ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def segment_image(filepath):
19
  image=cv2.imread(filepath)
20
  image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
21
  image = cv2.resize(image, (512,512))
22
- x=tfms(image.copy()/255.)
23
- #ort_input={ort_session.get_inputs()[0].name:x.cpu().unsqueeze(0).float().numpy()}
24
- #out=ort_session.run(None,ort_input)
25
- out = compiled_model_ir([x.unsqueeze(0).float().cpu().numpy()])
26
  pred_mask=np.squeeze(np.argmax(out[0],1)).astype(np.uint8)
27
- color_mask=cv2.applyColorMap(pred_mask,cv2.COLORMAP_MAGMA)*10
28
  masked_image=cv2.addWeighted(image,0.6,color_mask,0.4,0.1)
29
- return Image.fromarray(masked_image),Image.fromarray((color_mask))
 
 
 
 
 
 
30
 
31
  demo=gr.Interface(fn=segment_image,inputs=gr.Image(type='filepath'),
32
- outputs=[gr.Image(type="pil"),gr.Image(type="pil")],
33
  examples=["R001_ch1_video_03_00-29-13-03.jpg",
34
  "R002_ch1_video_01_01-07-25-19.jpg",
35
  "R003_ch1_video_05_00-22-42-23.jpg",
 
7
  core = Core()
8
 
9
  # Read model to OpenVINO Runtime
10
+ model_ir = core.read_model(model="Davinci_eye.onnx")
11
  compiled_model_ir = core.compile_model(model=model_ir, device_name='CPU')
12
 
13
  tfms = transforms.Compose([
14
  transforms.ToTensor(),
15
  transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
16
  ])
17
+ color_map={
18
+ (251,244,5): 1,
19
+ (37,250,5):2,
20
+ (0,21,209):3,
21
+ (172,21,2): 4,
22
+ (172,21,229): 5,
23
+ (6,254,249): 6,
24
+ (141,216,23):7,
25
+ (96,13,13):8,
26
+ (65,214,24):9,
27
+ (124,3,252):10,
28
+ (214,55,153):11,
29
+ (48,61,173):12,
30
+ (110,31,254):13,
31
+ (249,37,14):14,
32
+ (249,137,254):15,
33
+ (34,255,113):16,
34
+ (169,52,14):17,
35
+ (124,49,176):18,
36
+ (4,88,238):19,
37
+ (115,214,178):20,
38
+ (115,63,178):21,
39
+ (115,214,235):22,
40
+ (63,63,178): 23,
41
+ (130,34,26):24,
42
+ (220,158,161):25,
43
+ (201,117,56):26,
44
+ (121,16,40):27,
45
+ (15,126,0):28,
46
+ (0,50,70):29,
47
+ (20,20,0):30,
48
+ (20,20,0):31,
49
+ }
50
+
51
+ items = {
52
+ 1: "HarmonicAce_Head",
53
+ 2: "HarmonicAce_Body",
54
+ 3: "MarylandBipolarForceps_Head",
55
+ 4: "MarylandBipolarForceps_Wrist",
56
+ 5: "MarylandBipolarForceps_Body",
57
+ 6: "CadiereForceps_Head",
58
+ 7: "CadiereForceps_Wrist",
59
+ 8: "CadiereForceps_Body",
60
+ 9: "CurvedAtraumaticGrasper_Head",
61
+ 10: "CurvedAtraumaticGrasper_Body",
62
+ 11: "Stapler_Head",
63
+ 12: "Stapler_Body",
64
+ 13: "MediumLargeClipApplier_Head",
65
+ 14: "MediumLargeClipApplier_Wrist",
66
+ 15: "MediumLargeClipApplier_Body",
67
+ 16: "SmallClipApplier_Head",
68
+ 17: "SmallClipApplier_Wrist",
69
+ 18: "SmallClipApplier_Body",
70
+ 19: "SuctionIrrigation",
71
+ 20: "Needle",
72
+ 21: "Endotip",
73
+ 22: "Specimenbag",
74
+ 23: "DrainTube",
75
+ 24: "Liver",
76
+ 25: "Stomach",
77
+ 26: "Pancreas",
78
+ 27: "Spleen",
79
+ 28: "Gallbladder",
80
+ 29:"Gauze",
81
+ 30:"TheOther_Instruments",
82
+ 31:"TheOther_Tissues",
83
+
84
+
85
+ }
86
+
87
+ colormap={v:[i for i in k] for k,v in color_map.items()}
88
+
89
+ def convert_mask_to_rgb(pred_mask):
90
+ rgb_mask=np.zeros((pred_mask.shape[0],pred_mask.shape[1],3),dtype=np.uint8)
91
+ for k,v in colormap.items():
92
+ rgb_mask[pred_mask==k]=v
93
+ return rgb_mask
94
+
95
+
96
 
97
  def segment_image(filepath):
98
  image=cv2.imread(filepath)
99
  image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
100
  image = cv2.resize(image, (512,512))
101
+ x=tfms(image.copy())
102
+ #ort_input={ort_session.get_inputs()[0].name:x.cpu().unsqueeze(0).float().numpy()}
103
+ #out=ort_session.run(None,ort_input)
104
+ out = compiled_model_ir(x.unsqueeze(0).float().cpu().numpy())
105
  pred_mask=np.squeeze(np.argmax(out[0],1)).astype(np.uint8)
106
+ color_mask=convert_mask_to_rgb(pred_mask)
107
  masked_image=cv2.addWeighted(image,0.6,color_mask,0.4,0.1)
108
+ pred_keys=pred_mask[np.nonzero(pred_mask)]
109
+ objects=[items[k] for k in pred_keys]
110
+ surgery_items=np.unique(np.array(objects),axis=0)
111
+ surg=""
112
+ for item in surgery_items:
113
+ surg+=item+","+" "
114
+ return Image.fromarray(masked_image),surg
115
 
116
  demo=gr.Interface(fn=segment_image,inputs=gr.Image(type='filepath'),
117
+ outputs=[gr.Image(type="pil"),gr.Text()],
118
  examples=["R001_ch1_video_03_00-29-13-03.jpg",
119
  "R002_ch1_video_01_01-07-25-19.jpg",
120
  "R003_ch1_video_05_00-22-42-23.jpg",