bigmed@bigmed commited on
Commit
0a2ce36
β€’
1 Parent(s): 727292a

initial code commit

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +174 -0
  3. pipline.py +222 -0
  4. requirements.txt +6 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Glacuma Detection
3
- emoji: πŸš€
4
  colorFrom: blue
5
  colorTo: yellow
6
  sdk: gradio
 
1
  ---
2
  title: Glacuma Detection
3
+ emoji: πŸ‘€
4
  colorFrom: blue
5
  colorTo: yellow
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision.transforms as transforms
3
+ from torch.nn import functional as F
4
+ import cv2
5
+ import gradio as gr
6
+ import numpy as np
7
+ from PIL import Image
8
+ from pipline import Transformer_Regression, extract_regions_Last , compute_ratios
9
+
10
+
11
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
+
13
+ ## Define some parameters
14
+ image_shape = 384 #### 512 got 87
15
+ batch_size=1
16
+ dim_patch=4
17
+ num_classes=3
18
+ label_smoothing=0.1
19
+ scale=1
20
+ import time
21
+ start = time.time()
22
+ torch.manual_seed(0)
23
+ #import random
24
+
25
+
26
+ tfms = transforms.Compose([
27
+ transforms.Resize((image_shape, image_shape)),
28
+ transforms.ToTensor(),
29
+ transforms.Normalize(0.5,0.5)
30
+ #transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
31
+ #transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
32
+
33
+ ])
34
+
35
+ def Final_Compute_regression_results_Sample(Model, batch_sampler,num_head=2):
36
+ Model.eval()
37
+ score_cup = []
38
+ score_disc = []
39
+ yreg_pred = []
40
+ yreg_true = []
41
+ with torch.no_grad():
42
+ #for batch_sampler in loader:
43
+ train_batch_tfms = batch_sampler['image'].to(device=device)
44
+ #ytrue_seg = batch_sampler['image_original'] #.detach().cpu().numpy()
45
+ ytrue_seg = batch_sampler['image_original'] # .detach().cpu().numpy()
46
+ scores = Model(train_batch_tfms.unsqueeze(0))
47
+
48
+ yseg_pred = F.interpolate(scores['seg'], size=(ytrue_seg.shape[0], ytrue_seg.shape[1]), mode='bilinear',
49
+ align_corners=True)
50
+
51
+
52
+ # Regions_crop=extract_regions_Last(np.array(batch_sampler['image_original'][0]),yseg_pred[0].detach().cpu().numpy())
53
+ Regions_crop = extract_regions_Last(np.array(batch_sampler['image_original']),
54
+ yseg_pred.argmax(1).long()[0].detach().cpu().numpy())
55
+ Regions_crop['image'] = Image.fromarray(np.uint8(Regions_crop['image'])).convert('RGB')
56
+
57
+ ### Get back if two heads
58
+ ytrue_seg_crop = ytrue_seg[Regions_crop['cord'][0]:Regions_crop['cord'][1],
59
+ Regions_crop['cord'][2]:Regions_crop['cord'][3]]
60
+ ytrue_seg_crop = np.expand_dims(ytrue_seg_crop, axis=0)
61
+
62
+ if num_head==2:
63
+ scores = Model((tfms(Regions_crop['image']).unsqueeze(0)).to(device))
64
+ yseg_pred_crop = F.interpolate(scores['seg_aux_1'], size=(ytrue_seg_crop.shape[1], ytrue_seg_crop.shape[2]),
65
+ mode='bilinear', align_corners=True)
66
+ yseg_pred[:, :, Regions_crop['cord'][0]:Regions_crop['cord'][1],
67
+ Regions_crop['cord'][2]:Regions_crop['cord'][3]] = yseg_pred_crop
68
+ # yseg_pred[:, :, Regions_crop['cord'][0]:Regions_crop['cord'][1],
69
+ # Regions_crop['cord'][2]:Regions_crop['cord'][3]]+yseg_pred_crop
70
+ yseg_pred = torch.softmax(yseg_pred, dim=1)
71
+ yseg_pred = yseg_pred.argmax(1).long()
72
+ yseg_pred = ((yseg_pred).long()).detach().cpu().numpy()
73
+ ratios = compute_ratios(yseg_pred[0])
74
+ yreg_pred.append(ratios.vcdr)
75
+
76
+ ### Plot
77
+ p_img = batch_sampler['image'].to(device=device).unsqueeze(0)
78
+ p_img = F.interpolate(p_img, size=(yseg_pred.shape[1], yseg_pred.shape[2]),
79
+ mode='bilinear', align_corners=True)
80
+ ### Get reversed image
81
+ image_orig = (p_img[0] * 0.5 + 0.5).permute(1, 2, 0).detach().cpu().numpy()
82
+ image_orig=np.uint8(image_orig*255)
83
+ ####
84
+ # train_batch_tfms
85
+ #plt.imshow(image_orig)
86
+ # make a copy as these operations are destructive
87
+ image_cont = image_orig.copy()
88
+ ###### plot for Prediction....
89
+ # threshold for 2 value
90
+ ret, thresh = cv2.threshold(np.uint8(yseg_pred[0]), 1, 2, 0)
91
+ # find and draw contour for 2 value (red)
92
+ conts, hir = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
93
+ cv2.drawContours(image_cont, conts, -1, (0, 255, 0), 2)
94
+ #threshold for 1 value
95
+ ret, thresh = cv2.threshold(np.uint8(yseg_pred[0]), 0, 2, 0)
96
+ #find and draw contour for 1 value (blue)
97
+ conts, hir = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
98
+ cv2.drawContours(image_cont, conts, -1, (0, 0, 255), 2)
99
+ #plot contoured image
100
+
101
+ # plt.imshow(image_cont)
102
+ # plt.axis('off')
103
+
104
+ # print('Vertical cup to disc ratio:')
105
+ # print(ratios.vcdr)
106
+ if True:
107
+ glaucoma = 'not implemented'
108
+ # print('Galucoma:')
109
+
110
+
111
+ return image_cont, ratios.vcdr, glaucoma, Regions_crop
112
+
113
+ #load model
114
+ DeepLab=Transformer_Regression(image_dim=image_shape,dim_patch=dim_patch,num_classes=3,scale=scale,feat_dim=128)
115
+ DeepLab.to(device=device)
116
+ DeepLab.load_state_dict(torch.load("TrainAll_Maghrabi84_50iteration_SWIN.pth.tar"))
117
+
118
+ def infer(img):
119
+ # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
120
+
121
+ sample_batch = dict()
122
+
123
+ sample_batch['image_original'] = img
124
+
125
+ im_retina_pil = Image.fromarray(img)
126
+
127
+ im_retina_pil = tfms(im_retina_pil)
128
+ sample_batch['image'] = im_retina_pil
129
+
130
+ # plt.figure('Head2')
131
+ result, ratio, diagnosis, cropped = Final_Compute_regression_results_Sample(DeepLab, sample_batch, num_head=2)
132
+
133
+ # cropped = cv2.cvtColor(np.asarray(cropped), cv2.COLOR_BGR2RGB)
134
+ cropped = result[cropped['cord'][0] -100 :cropped['cord'][1] +100,
135
+ cropped['cord'][2] -100 :cropped['cord'][3] +100]
136
+
137
+ return ratio, diagnosis, result, cropped
138
+
139
+
140
+ title = "Glaucoma detection"
141
+ description = "Using vertical ratio"
142
+
143
+ outputs = [gr.Textbox(label="Vertical cup to disc ratio:"), gr.Textbox(label="predicted diagnosis"), gr.Image(label='labeled image'), gr.Image(label='zoomed in')]
144
+ with gr.Blocks(css='#title {text-align : center;} ') as demo:
145
+ with gr.Row():
146
+ gr.Markdown(
147
+ f'''
148
+ # {title}
149
+ {description}
150
+
151
+ ''',
152
+ elem_id='title'
153
+ )
154
+ with gr.Row():
155
+ with gr.Column():
156
+ prompt = gr.Image(label="Enter Your Retina Image")
157
+ btn = gr.Button(value='Submit')
158
+ examples = gr.Examples(
159
+ ['M00027.png','M00056.png','M00073.png','M00093.png', 'M00018.png', 'M00034.png'],
160
+ inputs=[prompt], fn=infer, outputs=[outputs], cache_examples=False)
161
+ with gr.Column():
162
+ with gr.Row():
163
+ text1 = gr.Textbox(label="Vertical cup to disc ratio:")
164
+ text2 = gr.Textbox(label="predicted diagnosis")
165
+ img = gr.Image(label='labeled image')
166
+ zoom = gr.Image(label='zoomed in')
167
+
168
+ outputs = [text1,text2,img,zoom]
169
+
170
+ btn.click(fn=infer, inputs=prompt, outputs=outputs)
171
+
172
+
173
+ if __name__ == '__main__':
174
+ demo.launch()
pipline.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #### This is an implmentation of deeplabv3 plus for retina detection
2
+ import torch
3
+ import torchvision
4
+ from torch.nn import functional as F
5
+ import torch.nn as nn
6
+ import numpy as np
7
+ import cv2
8
+ from skimage.measure import label, regionprops
9
+ import torch
10
+ from collections import namedtuple
11
+
12
+ # check you have the right version of timm
13
+ # assert timm.__version__ == "0.3.2"
14
+ from timm.models.swin_transformer import swin_base_patch4_window12_384_in22k
15
+
16
+ torch.manual_seed(0)
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ pad_value = 10
19
+
20
+
21
+ def extract_regions_Last(img_test, ytruth, pad1=pad_value, pad2=pad_value, pad3=pad_value, pad4=pad_value):
22
+
23
+ y_truth_copy = ytruth.copy()
24
+ y_truth_copy[y_truth_copy == 2] = 1
25
+ label_img = label(y_truth_copy)
26
+
27
+ regions = regionprops(label_img)
28
+ max_Area = -1
29
+ cropped_results = dict()
30
+ for props in regions:
31
+ if props.area > max_Area:
32
+ max_Area = props.area
33
+ minr, minc, maxr, maxc = props.bbox
34
+ bx = (minc, maxc, maxc, minc, minc)
35
+ by = (minr, minr, maxr, maxr, minr)
36
+ # print(minr,maxr)
37
+ # print(bx)
38
+ # ax.plot(bx, by, '-b', linewidth=2.5)
39
+ # cropped_image= pred_class[minr-pad:maxr+pad, minc-pad:maxc+pad]
40
+ # cropped_pred_mask = pred_class[minr - pad:maxr + pad, minc - pad:maxc + pad]
41
+ if minr - pad1 < 0:
42
+ pad1 = 5
43
+ if minr - pad1 < 0:
44
+ pad1 = 0
45
+
46
+ if minc - pad2 < 0:
47
+ pad2 = 5
48
+ if minc - pad2 < 0:
49
+ pad2 = 0
50
+ if maxr + pad3 > label_img.shape[0]:
51
+ pad3 = 5
52
+ if maxr + pad3 > label_img.shape[0]:
53
+ pad3 = 0
54
+
55
+ if maxc + pad4 > label_img.shape[1]:
56
+ pad4 = 5
57
+ if maxc + pad4 > label_img.shape[1]:
58
+ pad4 = 0
59
+
60
+ cropped_image = img_test[minr - pad1:maxr + pad3, minc - pad2:maxc + pad4, :]
61
+ cropped_truth = ytruth[minr - pad1:maxr + pad3, minc - pad2:maxc + pad4]
62
+ txcordi = []
63
+ txcordi.append(minr - pad1)
64
+ txcordi.append(maxr + pad3)
65
+ txcordi.append(minc - pad2)
66
+ txcordi.append(maxc + pad4)
67
+ cropped_results['image'] = cropped_image
68
+ cropped_results['truth'] = cropped_truth
69
+ cropped_results['cord'] = txcordi
70
+
71
+ return cropped_results
72
+
73
+
74
+ class BasicBlock(nn.Module):
75
+ def __init__(self, channel_num):
76
+ super(BasicBlock, self).__init__()
77
+ # TODO: 3x3 convolution -> relu
78
+ # the input and output channel number is channel_num
79
+ self.conv_block1 = nn.Sequential(
80
+ nn.Conv2d(channel_num, 48, 1, padding=0),
81
+ nn.GroupNorm(num_groups=8, num_channels=48),
82
+ nn.GELU(),
83
+ )
84
+ self.conv_block2 = nn.Sequential(
85
+ nn.Conv2d(48, channel_num, 3, padding=1),
86
+ nn.GroupNorm(num_groups=8, num_channels=channel_num),
87
+ nn.GELU(),
88
+ )
89
+ self.relu = nn.GELU()
90
+
91
+ def forward(self, x):
92
+ # TODO: forward
93
+ residual = x
94
+ x = self.conv_block1(x)
95
+ x = self.conv_block2(x)
96
+ x = x + residual
97
+ return x
98
+
99
+
100
+ class ASPP(nn.Module):
101
+ def __init__(self, image_dim=384, head=1):
102
+ super(ASPP, self).__init__()
103
+ self.image_dim = image_dim
104
+ self.Residual2 = BasicBlock(channel_num=head)
105
+ self.pixel_shuffle = nn.PixelShuffle(2)
106
+ self.head = head
107
+
108
+ def forward(self, x):
109
+ x21 = F.interpolate(x, size=(self.image_dim, self.image_dim), mode='bilinear',
110
+ align_corners=True)
111
+ return x21
112
+
113
+
114
+ class Transformer_Regression(nn.Module):
115
+ def __init__(self, image_dim=224, dim_patch=24, num_classes=3, scale=1, feat_dim=192):
116
+ super(Transformer_Regression, self).__init__()
117
+ self.backbone = swin_base_patch4_window12_384_in22k(pretrained=True)
118
+ self.aux = 1
119
+ self.dim_patch = dim_patch
120
+ self.image_dim = image_dim
121
+ self.num_classes = num_classes
122
+ self.ASPP1 = ASPP(image_dim, head=128)
123
+ self.ASPP2 = ASPP(image_dim, head=128)
124
+ # self.ASPP3=ASPP(image_dim,scale,feat_dim)
125
+ self.feat_dim = feat_dim
126
+ # self.scale=1
127
+ self.Classifier_main = nn.Sequential(
128
+ # nn.Dropout(0.1),
129
+ nn.Conv2d(128, self.num_classes, 3, bias=True, padding=1),
130
+ )
131
+ self.Classifier_aux1 = nn.Sequential(
132
+ # nn.Dropout(0.1),
133
+ nn.Conv2d(128, self.num_classes, 3, bias=True, padding=1),
134
+ )
135
+
136
+ self.conv1 = nn.Sequential(nn.Conv2d(448, 128, kernel_size=(1, 1), padding=1), nn.GELU())
137
+ self.pixelshufler1 = nn.PixelShuffle(2)
138
+ self.pixelshufler2 = nn.PixelShuffle(4)
139
+
140
+ def forward(self, x):
141
+ hide1 = self.backbone(x)
142
+ x1 = []
143
+ x1.append((hide1[0][:, 0:].reshape(-1, 48, 48, 256)))
144
+ x1.append((hide1[1][:, 0:].reshape(-1, 24, 24, 512)))
145
+ x1.append((hide1[2][:, 0:].reshape(-1, 12, 12, 1024)))
146
+ for jk in range(len(x1)):
147
+ x1[jk] = x1[jk].permute(0, 3, 1, 2)
148
+ x1[1] = self.pixelshufler1(x1[1])
149
+ x1[2] = self.pixelshufler2(x1[2])
150
+
151
+ x1[0] = torch.cat((x1[0], x1[1], x1[2]), 1)
152
+
153
+ x1[0] = self.conv1(x1[0])
154
+ Score = dict()
155
+ x_main1 = self.ASPP1(x1[0])
156
+ x_main = self.Classifier_main(x_main1)
157
+ x_aux_1 = self.ASPP2(x1[0])
158
+ x_aux_1 = self.Classifier_aux1(x_aux_1) ####### x_aux_1
159
+
160
+ Score['seg'] = x_main
161
+ Score['seg_aux_1'] = x_aux_1
162
+ # Score['seg_aux_2'] = x_aux_2
163
+
164
+ return Score
165
+
166
+
167
+
168
+
169
+ Ratios = namedtuple("Ratios", 'cdr hcdr vcdr')
170
+ eps = np.finfo(np.float32).eps
171
+
172
+
173
+ def compute_ratios(mask_image):
174
+ '''
175
+ Given an input image containing the cup and disc masks the function returns
176
+ a tuple with the area, horizontal, and vertical cup-to-disc ratios
177
+ Input:
178
+ mask_image: an image with values (0,1,2) or (255,128,0)
179
+ for bg, disc, cup respectively
180
+ Output:
181
+ Ratios(cdr,hcdr,vcdr): a named tuple containing the computed ratios
182
+ '''
183
+
184
+ # if mask_image.max() == 2:
185
+ # make sure correct values are provided in the image
186
+ # if np.setdiff1d(np.unique(mask_image),np.array([0,1,2])).shape[0]>0:
187
+ # raise ValueError(('Mask values can only be (0,1,2) '
188
+ # 'or (255,128,0) for bg, disc, cup'))
189
+ # disc = np.uint8(mask_image > 0)
190
+ # cup = np.uint8(mask_image > 1)
191
+ # elif mask_image.max() == 255:
192
+ # # make sure correct values are provided in the image
193
+ # if np.setdiff1d(np.unique(mask_image),np.array([0,128,255])).shape[0]>0:
194
+ # raise ValueError(('Mask values can only be (0,1,2) '
195
+ # 'or (255,128,0) for bg, disc, cup'))
196
+ # disc = np.uint8(mask_image < 255)
197
+ # cup = np.uint8(mask_image == 0)
198
+ # else:
199
+ # raise ValueError(("Mask values can only be (0,1,2) or (255,128,0) "
200
+ # "for bg, disc, cup"))
201
+
202
+ # get the area
203
+ disc = 0
204
+ cup = 0
205
+ disc = disc + np.uint8(mask_image > 0)
206
+ cup = cup + np.uint8(mask_image > 1)
207
+
208
+ disc_area = np.sum(disc)
209
+ cup_area = np.sum(cup)
210
+ # get the vertical and horizontal mesure of the cup
211
+ cup_vert = np.sum(cup, axis=0).max().astype(np.int32)
212
+ cup_horz = np.sum(cup, axis=1).max().astype(np.int32)
213
+ # get the vertical and horizontal mesure of the disc
214
+ disc_vert = np.sum(disc, axis=0).max().astype(np.int32)
215
+ disc_horz = np.sum(disc, axis=1).max().astype(np.int32)
216
+ # calculate the cup to disc ratio
217
+ cdr = (cup_area + eps) / (disc_area + eps) # add eps to avoid div by 0
218
+ # calculate the horizontal and vertical cup to disc ration
219
+ hcdr = (cup_horz + eps) / (disc_horz + eps)
220
+ vcdr = (cup_vert + eps) / (disc_vert + eps)
221
+
222
+ return Ratios(cdr, hcdr, vcdr)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ timm
3
+ skimage
4
+ numpy
5
+ cv2
6
+ Pillow