# Libraries import streamlit as st from tensorflow import keras import tensorflow as tf from PIL import Image import numpy as np import cv2 import matplotlib.pyplot as plt from imutils import perspective from scipy.spatial import distance as dist import os # Constants MODEL_PATH = 'model.h5' IMAGE_DIR = 'images' # Functions def load_image(image_file): img = Image.open(image_file) return img def midpoint(ptA, ptB): return ((ptA[0] + ptB[0]) /2 , (ptA[1] + ptB[1]) /2) def draw_dimensions(orig_image,predict_image,erode_iteration,open_iteration): kernel1 =( np.ones((5,5), dtype=np.float32))# kernel_sharpening = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])# image = predict_image image2 = orig_image image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel1,iterations=open_iteration ) image = cv2.filter2D(image, -1, kernel_sharpening) image = cv2.erode(image,kernel1,iterations =erode_iteration) image=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)#original thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1] labels=cv2.connectedComponents(thresh,connectivity=8)[1] a=np.unique(labels) count2=0 for label in a: if label == 0: continue # Create a mask mask = np.zeros(thresh.shape, dtype="uint8") mask[labels == label] = 255 # Find contours and determine contour area cnts,hieararch = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] c_area = cv2.contourArea(cnts) (x,y),radius = cv2.minEnclosingCircle(cnts) rect = cv2.minAreaRect(cnts) box = cv2.boxPoints(rect) box = np.array(box, dtype="int") box = perspective.order_points(box) color1 = (list(np.random.choice(range(150), size=3))) color =[int(color1[0]), int(color1[1]), int(color1[2])] cv2.drawContours(image2,[box.astype("int")],0,color,2) (tl,tr,br,bl)=box (tltrX,tltrY)=midpoint(tl,tr) (blbrX,blbrY)=midpoint(bl,br) # compute the midpoint between the top-left and top-right points, # followed by the midpoint between the top-righ and bottom-right (tlblX,tlblY)=midpoint(tl,bl) (trbrX,trbrY)=midpoint(tr,br) # draw the midpoints on the image cv2.circle(image2, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1) cv2.circle(image2, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1) cv2.circle(image2, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1) cv2.circle(image2, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1) cv2.line(image2, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),color, 2) cv2.line(image2, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),color, 2) dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY)) dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY)) global dimA dimA = dA*0.08 global dimB dimB = dB*0.08 cv2.putText(image2, "{:.1f} millimeter".format(dimA),(int(tltrX - 10), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,0.65, (0,0,0), 2) cv2.putText(image2, "{:.1f} millimeter".format(dimB),(int(trbrX + 10), int(trbrY+10)), cv2.FONT_HERSHEY_SIMPLEX,0.65,(0,0,0), 2) return image2 def segment_molar(image_file): img=load_image(image_file) img = np.asarray(img) img = cv2.resize(img, (512, 256)) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = np.expand_dims(img, axis=0) prediction = model.predict(img) output = prediction.reshape(256,512) return img , output def measure_molar(image_file): img=load_image(image_file) img = np.asarray(img) img_prd= cv2.resize(img , (512, 256)) img_prd = cv2.cvtColor(img_prd, cv2.COLOR_RGB2GRAY) img_prd = np.expand_dims(img_prd, axis=0) prediction = model.predict(img_prd) prediction=prediction*255 prediction = prediction.astype("uint8") prediction_img=prediction.reshape(256,512) img2 = np.zeros( ( np.array(prediction_img).shape[0], np.array(prediction_img).shape[1], 3 ) ) img2[:,:,0] = prediction_img img2[:,:,1] = prediction_img img2[:,:,2] = prediction_img img2 = img2.astype("uint8") predicted = cv2.resize(img2 , (img.shape[1],img.shape[0]), interpolation=cv2.INTER_LANCZOS4) output=draw_dimensions(img,predicted,3,2) return output # Load model and images model = keras.models.load_model(MODEL_PATH) images = os.listdir(IMAGE_DIR) images = [f'{IMAGE_DIR}/{image}' for image in images] # Labels labels = {'fatemeh_hafari_27022022_181119right.png': 'classI', 'majid_mohammadkhanlu_19022022_175338right.png': 'mesioangular', 'zahra_khodabandeh_17052022_162804right.png': 'mesioangular', '133028019739531250right.png': 'classII', 'khadijeh_fard_aghababaei_03012022_120330right.png': 'B', 'moharam_ali_khamseh_01112021_194329right.png': 'B', 'parniya_mohamadinasab_08112021_184639right.png': 'B', 'afsaneh_khalaji_24022022_183000right.png': 'B', 'sogand_rahmani_13072022_113636right.png': 'B', 'fatemeh_mirzaei_13072022_101734right.png': 'mesioangular', 'fatemeh_hoseini_24012022_111330right.png': 'C', 'padisar_ebrahimi_13122021_111512right.png': 'mesioangular'} # UserInterface st.header("3rdMolar Segmentation") st.subheader("Select Image:") image_file = st.selectbox('Select Image' , images) if image_file is not None: img , output1 = segment_molar(image_file) output2 = measure_molar(image_file) st.subheader(f"Label : {labels[image_file.split('/')[1]]}") st.image(img[0],width=850) st.image(output1,width=850) st.image(output2,width=850)