alibidaran commited on
Commit
0a4e702
1 Parent(s): 7e019cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -14
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from openvino.runtime import Core
2
  import gradio as gr
3
  import numpy as np
4
  from PIL import Image
@@ -6,14 +6,101 @@ import cv2
6
  from torchvision import models,transforms
7
  from typing import Iterable
8
  import gradio as gr
 
9
  from gradio.themes.base import Base
10
  from gradio.themes.utils import colors, fonts, sizes
11
  import time
12
- core = Core()
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  # Read model to OpenVINO Runtime
15
- model_ir = core.read_model(model="Davinci_eye.onnx")
16
- compiled_model_ir = core.compile_model(model=model_ir, device_name='CPU')
17
 
18
  tfms = transforms.Compose([
19
  transforms.ToTensor(),
@@ -21,8 +108,8 @@ tfms = transforms.Compose([
21
  ])
22
  color_map={
23
  (251,244,5): 1,
24
- (37,250,5):2,
25
- (0,21,209):3,
26
  (172,21,2): 4,
27
  (172,21,229): 5,
28
  (6,254,249): 6,
@@ -52,7 +139,7 @@ color_map={
52
  (20,20,0):30,
53
  (20,20,0):31,
54
  }
55
-
56
  items = {
57
  1: "HarmonicAce_Head",
58
  2: "HarmonicAce_Body",
@@ -127,27 +214,29 @@ class Davinci_Eye(Base):
127
 
128
  davincieye = Davinci_Eye()
129
 
130
- colormap={v:[i for i in k] for k,v in color_map.items()}
131
 
132
  def convert_mask_to_rgb(pred_mask):
 
133
  rgb_mask=np.zeros((pred_mask.shape[0],pred_mask.shape[1],3),dtype=np.uint8)
134
  for k,v in colormap.items():
135
  rgb_mask[pred_mask==k]=v
136
  return rgb_mask
137
 
138
 
139
-
140
  def segment_image(filepath):
141
  image=cv2.imread(filepath)
142
  image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
143
- image = cv2.resize(image, (512,512))
144
- x=tfms(image.copy())
 
 
145
  #ort_input={ort_session.get_inputs()[0].name:x.cpu().unsqueeze(0).float().numpy()}
146
  #out=ort_session.run(None,ort_input)
147
- out = compiled_model_ir(x.unsqueeze(0).float().cpu().numpy())
148
- pred_mask=np.squeeze(np.argmax(out[0],1)).astype(np.uint8)
 
149
  color_mask=convert_mask_to_rgb(pred_mask)
150
- masked_image=cv2.addWeighted(image,0.6,color_mask,0.4,0.1)
151
  pred_keys=pred_mask[np.nonzero(pred_mask)]
152
  objects=[items[k] for k in pred_keys]
153
  surgery_items=np.unique(np.array(objects),axis=0)
@@ -156,6 +245,7 @@ def segment_image(filepath):
156
  surg+=item+","+" "
157
  return Image.fromarray(masked_image),surg
158
 
 
159
  demo=gr.Interface(fn=segment_image,inputs=gr.Image(type='filepath'),
160
  outputs=[gr.Image(type="pil"),gr.Text()],
161
  examples=["R001_ch1_video_03_00-29-13-03.jpg",
 
1
+ #from openvino.runtime import Core
2
  import gradio as gr
3
  import numpy as np
4
  from PIL import Image
 
6
  from torchvision import models,transforms
7
  from typing import Iterable
8
  import gradio as gr
9
+ from torch import nn
10
  from gradio.themes.base import Base
11
  from gradio.themes.utils import colors, fonts, sizes
12
  import time
13
+ import intel_extension_for_pytorch as ipex
14
 
15
+ #core = Core()
16
+ def conv(in_channels, out_channels):
17
+ return nn.Sequential(
18
+ nn.ReflectionPad2d(1),
19
+ nn.Conv2d(in_channels, out_channels, kernel_size=3),
20
+ nn.BatchNorm2d(out_channels),
21
+ nn.ReLU(inplace=True)
22
+ )
23
+ class resconv(nn.Module):
24
+ def __init__(self,in_features,out_features):
25
+ super(resconv,self).__init__()
26
+ self.block=nn.Sequential(
27
+ nn.ReflectionPad2d(1),
28
+ nn.Conv2d(in_features,out_features,3),
29
+ nn.InstanceNorm2d(out_features),
30
+ nn.ReLU(inplace=True),
31
+ nn.ReflectionPad2d(1),
32
+ nn.Conv2d(out_features,out_features,3),
33
+ nn.InstanceNorm2d(out_features),
34
+ nn.ReLU(inplace=True),
35
+
36
+ )
37
+ def forward(self,x):
38
+ return x+self.block(x)
39
+
40
+ def up_conv(in_channels, out_channels):
41
+ return nn.Sequential(
42
+ nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
43
+ nn.BatchNorm2d(out_channels),
44
+ nn.ReLU(inplace=True)
45
+ )
46
+
47
+ class ResnUnet(nn.Module):
48
+ def __init__(self, out_channels=32,number_of_block=9):
49
+ super().__init__()
50
+ out_features=64
51
+ channels=3
52
+ model=[nn.ReflectionPad2d(3),nn.Conv2d(3,out_features,7),nn.InstanceNorm2d(out_features),
53
+ nn.ReLU(inplace=True),nn.MaxPool2d(3,stride=2)]
54
+ model+=[resconv(out_features,out_features)]
55
+ model+=[nn.Conv2d(out_features,out_features*2,3,stride=2,padding=1),nn.InstanceNorm2d(out_features),
56
+ nn.ReLU(inplace=True)]
57
+ model+=[resconv(out_features*2,out_features*2)]
58
+ model+=[nn.Conv2d(out_features*2,out_features*4,3,stride=2,padding=1),nn.InstanceNorm2d(out_features),
59
+ nn.ReLU(inplace=True)]
60
+ model+=[resconv(out_features*4,out_features*4)]
61
+ model+=[nn.Conv2d(out_features*4,out_features*8,3,stride=2,padding=1),nn.InstanceNorm2d(out_features),
62
+ nn.ReLU(inplace=True)]
63
+ model+=[resconv(out_features*8,out_features*8)]
64
+ out_features*=8
65
+ input_features=out_features
66
+ for _ in range(4):
67
+ out_features//=2
68
+ model+=[
69
+ nn.Upsample(scale_factor=2),
70
+ nn.Conv2d(input_features,out_features,3,stride=1,padding=1),
71
+ nn.InstanceNorm2d(out_features),
72
+ nn.ReLU(inplace=True)
73
+ ]
74
+ input_features=out_features
75
+ model+=[nn.ReflectionPad2d(3),nn.Conv2d(32,32,7),
76
+ ]
77
+ self.model=nn.Sequential(*model)
78
+ def forward(self,x):
79
+ return self.model(x)
80
+
81
+
82
+ model=ResnUnet().to('cpu')
83
+
84
+ # Load the state_dict
85
+ state_dict = torch.load('/content/real_model1_onnx_compat.pt',map_location='cpu')
86
+
87
+ # Create a new state_dict without the 'module.' prefix
88
+ new_state_dict = {}
89
+ for key, value in state_dict.items():
90
+ new_key = key.replace("module.", "") # Remove the 'module.' prefix
91
+ new_state_dict[new_key] = value
92
+
93
+ # Load the new state_dict into your model
94
+ model.load_state_dict(new_state_dict)
95
+ model.eval()
96
+
97
+ model = ipex.optimize(model, weights_prepack=False)
98
+
99
+ model = torch.compile(model, backend="ipex")
100
+
101
  # Read model to OpenVINO Runtime
102
+ #model_ir = core.read_model(model="Davinci_eye.onnx")
103
+ #compiled_model_ir = core.compile_model(model=model_ir, device_name='CPU')
104
 
105
  tfms = transforms.Compose([
106
  transforms.ToTensor(),
 
108
  ])
109
  color_map={
110
  (251,244,5): 1,
111
+ (37,250,5):2,
112
+ (0,21,209):3,
113
  (172,21,2): 4,
114
  (172,21,229): 5,
115
  (6,254,249): 6,
 
139
  (20,20,0):30,
140
  (20,20,0):31,
141
  }
142
+ colormap={v:[i for i in k] for k,v in color_map.items()}
143
  items = {
144
  1: "HarmonicAce_Head",
145
  2: "HarmonicAce_Body",
 
214
 
215
  davincieye = Davinci_Eye()
216
 
 
217
 
218
  def convert_mask_to_rgb(pred_mask):
219
+
220
  rgb_mask=np.zeros((pred_mask.shape[0],pred_mask.shape[1],3),dtype=np.uint8)
221
  for k,v in colormap.items():
222
  rgb_mask[pred_mask==k]=v
223
  return rgb_mask
224
 
225
 
 
226
  def segment_image(filepath):
227
  image=cv2.imread(filepath)
228
  image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
229
+ image = cv2.resize(image, (224,224))
230
+ x=tfms(image.copy()/255.)
231
+ with torch.no_grad():
232
+ mask=model(x.unsqueeze(0).float())
233
  #ort_input={ort_session.get_inputs()[0].name:x.cpu().unsqueeze(0).float().numpy()}
234
  #out=ort_session.run(None,ort_input)
235
+ _,pred_mask=torch.max(mask,dim=1)
236
+ pred_mask=pred_mask[0].numpy()
237
+ pred_mask=pred_mask.astype(np.uint8)
238
  color_mask=convert_mask_to_rgb(pred_mask)
239
+ masked_image=cv2.addWeighted(image,0.3,color_mask,0.8,0.2)
240
  pred_keys=pred_mask[np.nonzero(pred_mask)]
241
  objects=[items[k] for k in pred_keys]
242
  surgery_items=np.unique(np.array(objects),axis=0)
 
245
  surg+=item+","+" "
246
  return Image.fromarray(masked_image),surg
247
 
248
+
249
  demo=gr.Interface(fn=segment_image,inputs=gr.Image(type='filepath'),
250
  outputs=[gr.Image(type="pil"),gr.Text()],
251
  examples=["R001_ch1_video_03_00-29-13-03.jpg",