BK-V commited on
Commit
9885633
1 Parent(s): 00eda87

add app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -0
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Libraries
2
+ import streamlit as st
3
+ from tensorflow import keras
4
+ import tensorflow as tf
5
+ from PIL import Image
6
+ import numpy as np
7
+ import cv2
8
+ import matplotlib.pyplot as plt
9
+ from imutils import perspective
10
+ from scipy.spatial import distance as dist
11
+ import os
12
+
13
+ # Constants
14
+ MODEL_PATH = 'model.h5'
15
+ IMAGE_DIR = 'images'
16
+
17
+ # Functions
18
+ def load_image(image_file):
19
+ img = Image.open(image_file)
20
+ return img
21
+
22
+ def midpoint(ptA, ptB):
23
+ return ((ptA[0] + ptB[0]) /2 , (ptA[1] + ptB[1]) /2)
24
+
25
+ def draw_dimensions(orig_image,predict_image,erode_iteration,open_iteration):
26
+ kernel1 =( np.ones((5,5), dtype=np.float32))#
27
+ kernel_sharpening = np.array([[-1,-1,-1],
28
+ [-1,9,-1],
29
+ [-1,-1,-1]])#
30
+
31
+ image = predict_image
32
+ image2 = orig_image
33
+
34
+ image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel1,iterations=open_iteration )
35
+ image = cv2.filter2D(image, -1, kernel_sharpening)
36
+ image = cv2.erode(image,kernel1,iterations =erode_iteration)
37
+
38
+ image=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)#original
39
+
40
+ thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
41
+ labels=cv2.connectedComponents(thresh,connectivity=8)[1]
42
+ a=np.unique(labels)
43
+ count2=0
44
+ for label in a:
45
+ if label == 0:
46
+ continue
47
+
48
+ # Create a mask
49
+ mask = np.zeros(thresh.shape, dtype="uint8")
50
+ mask[labels == label] = 255
51
+ # Find contours and determine contour area
52
+ cnts,hieararch = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
53
+ cnts = cnts[0]
54
+ c_area = cv2.contourArea(cnts)
55
+
56
+ (x,y),radius = cv2.minEnclosingCircle(cnts)
57
+ rect = cv2.minAreaRect(cnts)
58
+ box = cv2.boxPoints(rect)
59
+ box = np.array(box, dtype="int")
60
+ box = perspective.order_points(box)
61
+ color1 = (list(np.random.choice(range(150), size=3)))
62
+ color =[int(color1[0]), int(color1[1]), int(color1[2])]
63
+ cv2.drawContours(image2,[box.astype("int")],0,color,2)
64
+ (tl,tr,br,bl)=box
65
+
66
+ (tltrX,tltrY)=midpoint(tl,tr)
67
+ (blbrX,blbrY)=midpoint(bl,br)
68
+ # compute the midpoint between the top-left and top-right points,
69
+ # followed by the midpoint between the top-righ and bottom-right
70
+ (tlblX,tlblY)=midpoint(tl,bl)
71
+ (trbrX,trbrY)=midpoint(tr,br)
72
+ # draw the midpoints on the image
73
+ cv2.circle(image2, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
74
+ cv2.circle(image2, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
75
+ cv2.circle(image2, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
76
+ cv2.circle(image2, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
77
+ cv2.line(image2, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),color, 2)
78
+ cv2.line(image2, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),color, 2)
79
+ dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
80
+ dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
81
+
82
+ global dimA
83
+ dimA = dA*0.08
84
+ global dimB
85
+ dimB = dB*0.08
86
+ cv2.putText(image2, "{:.1f} millimeter".format(dimA),(int(tltrX - 10), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,0.65, (0,0,0), 2)
87
+ cv2.putText(image2, "{:.1f} millimeter".format(dimB),(int(trbrX + 10), int(trbrY+10)), cv2.FONT_HERSHEY_SIMPLEX,0.65,(0,0,0), 2)
88
+
89
+ return image2
90
+
91
+ def segment_molar(image_file):
92
+
93
+ img=load_image(image_file)
94
+
95
+ img = np.asarray(img)
96
+ img = cv2.resize(img, (512, 256))
97
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
98
+ img = np.expand_dims(img, axis=0)
99
+
100
+ prediction = model.predict(img)
101
+
102
+ output = prediction.reshape(256,512)
103
+
104
+ return img , output
105
+
106
+ def measure_molar(image_file):
107
+
108
+ img=load_image(image_file)
109
+
110
+ img = np.asarray(img)
111
+ img_prd= cv2.resize(img , (512, 256))
112
+ img_prd = cv2.cvtColor(img_prd, cv2.COLOR_RGB2GRAY)
113
+ img_prd = np.expand_dims(img_prd, axis=0)
114
+ prediction = model.predict(img_prd)
115
+ prediction=prediction*255
116
+ prediction = prediction.astype("uint8")
117
+ prediction_img=prediction.reshape(256,512)
118
+ img2 = np.zeros( ( np.array(prediction_img).shape[0], np.array(prediction_img).shape[1], 3 ) )
119
+ img2[:,:,0] = prediction_img
120
+ img2[:,:,1] = prediction_img
121
+ img2[:,:,2] = prediction_img
122
+ img2 = img2.astype("uint8")
123
+ predicted = cv2.resize(img2 , (img.shape[1],img.shape[0]), interpolation=cv2.INTER_LANCZOS4)
124
+ output=draw_dimensions(img,predicted,3,2)
125
+
126
+ return output
127
+
128
+ # Load model and images
129
+ model = keras.models.load_model(MODEL_PATH)
130
+ images = os.listdir(IMAGE_DIR)
131
+ images = [f'{IMAGE_DIR}/image' for image in images]
132
+
133
+ # UserInterface
134
+ st.header("3rdMolar Segmentation")
135
+ st.subheader("Select Image:")
136
+ image_file = st.selectbox('Select Image' , images)
137
+
138
+ if image_file is not None:
139
+
140
+ img , output1 = segment_molar(image_file)
141
+ output2 = measure_molar(image_file)
142
+
143
+ st.image(img,width=850)
144
+ st.image(output1,width=850)
145
+ st.image(output2,width=850)
146
+