Spaces:
Runtime error
Runtime error
file updated
Browse files- boneage_web.py +219 -0
- combined_model.py +47 -0
- enhance_image.py +35 -0
- img_extractor.py +167 -0
- inception_v3_cbam_arch.py +213 -0
- process_input_pipeline.py +45 -0
- resize_image_inception.py +35 -0
- resnet50_arch.py +182 -0
- testing_age.py +58 -0
- updated_resnet_1.py +181 -0
boneage_web.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image,ImageOps
|
6 |
+
import numpy as np
|
7 |
+
import random
|
8 |
+
import cv2
|
9 |
+
import tensorflow as tf
|
10 |
+
from tensorflow import keras
|
11 |
+
from keras.models import load_model
|
12 |
+
|
13 |
+
import time
|
14 |
+
# import mysql.connector
|
15 |
+
|
16 |
+
import process_input_pipeline as pp
|
17 |
+
import img_extractor
|
18 |
+
from process_input_pipeline import ImageResizer, ContrastEnhancer
|
19 |
+
|
20 |
+
# added for inception concat
|
21 |
+
from sklearn.pipeline import make_pipeline
|
22 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
23 |
+
|
24 |
+
import combined_model
|
25 |
+
|
26 |
+
cm = combined_model.build_regression_model()
|
27 |
+
cm.load_weights(r"F:\python\web_streamlit\ALL_IN_ONE\model_weights\model_weights\combined_weights_7_epoch_third_acc.h5")
|
28 |
+
######################################################
|
29 |
+
roi=img_extractor.RoiExtractor()
|
30 |
+
|
31 |
+
# Connect to MySQL
|
32 |
+
# conn = mysql.connector.connect(
|
33 |
+
# host="localhost",
|
34 |
+
# user = "root",
|
35 |
+
# password = "",
|
36 |
+
# database = "FeedbackCollection"
|
37 |
+
# )
|
38 |
+
# try:
|
39 |
+
# if conn.is_connected:
|
40 |
+
# #create cursor
|
41 |
+
# cursor = conn.cursor()
|
42 |
+
|
43 |
+
# #Create feedback table if not exits
|
44 |
+
# cursor.execute('''CREATE TABLE IF NOT EXISTS feedback(
|
45 |
+
# id INT AUTO_INCREMENT PRIMARY KEY,
|
46 |
+
# name VARCHAR(255),
|
47 |
+
# feedback_type VARCHAR(255),
|
48 |
+
# feedback_message TEXT
|
49 |
+
# )''')
|
50 |
+
|
51 |
+
# conn.commit()
|
52 |
+
# except mysql.connector.Error as e:
|
53 |
+
# st.error(f"Error connecting to MySQL: {e}")
|
54 |
+
|
55 |
+
|
56 |
+
# from sklearn.metrics import mean_absolute_error
|
57 |
+
|
58 |
+
# Hide the Streamlit style, including the "Deploy" widget
|
59 |
+
# st.set_page_config(hide_streamlit_style=True)
|
60 |
+
|
61 |
+
# Sidebar: Navigation
|
62 |
+
st.sidebar.header('Navigation')
|
63 |
+
|
64 |
+
page = st.sidebar.selectbox('Select',
|
65 |
+
['Learn About Bone Age',
|
66 |
+
'Bone Age Predictor',
|
67 |
+
'Resources', 'Feedback Form'])
|
68 |
+
|
69 |
+
# # Education page
|
70 |
+
if page == 'Learn About Bone Age':
|
71 |
+
st.title('About Bone Age')
|
72 |
+
|
73 |
+
expander = st.expander('What is bone age?')
|
74 |
+
expander.write("'Bone age is an interpretation of skeletal maturity, typically based on radiographs of the left hand and wrist or knee, that has provided useful information in various clinical settings for over 75 years.' (Creo AL, Schwenk WF. Bone age: a handy tool for pediatric providers. Pediatrics. 2017;140(6).)")
|
75 |
+
'\n'
|
76 |
+
expander = st.expander('What are current methods of determining bone age?')
|
77 |
+
expander.write("""
|
78 |
+
* Tanner-White method: ...
|
79 |
+
* Greulich-Pyle method: ...
|
80 |
+
""")
|
81 |
+
'\n'
|
82 |
+
expander = st.expander('What are some clinical uses for bone age?')
|
83 |
+
expander.write("""
|
84 |
+
* Diagnosing certain growth (endocrinologic) conditions
|
85 |
+
* Determining which patients would benefit from treatment
|
86 |
+
* Monitoring treatment
|
87 |
+
* Predicting adult height
|
88 |
+
""")
|
89 |
+
'\n'
|
90 |
+
expander = st.expander('What are some non-clinical uses for bone age?')
|
91 |
+
expander.write("""
|
92 |
+
* Athletics
|
93 |
+
* Forensics
|
94 |
+
* Legal/policy
|
95 |
+
""")
|
96 |
+
|
97 |
+
#Bone age prediction
|
98 |
+
# @st.cache_data
|
99 |
+
# def get_model(model_path):
|
100 |
+
# try:
|
101 |
+
# model=load_model(model_path)
|
102 |
+
# return model
|
103 |
+
# except Exception as e:
|
104 |
+
# st.error(f"Error loading the model: {e}")
|
105 |
+
# return None
|
106 |
+
|
107 |
+
#load the model
|
108 |
+
# model = get_model(r'F:\python\web_streamlit\ALL_IN_ONE\combined_weights_10_epoch_fourth_normalaize_1.h5')
|
109 |
+
|
110 |
+
# Function for processing image
|
111 |
+
def process_image(img, img_size=(224, 224)):
|
112 |
+
try:
|
113 |
+
# image = ImageOps.fit(img, img_size)
|
114 |
+
# image = np.asarray(image)
|
115 |
+
# img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
116 |
+
# img_resize = preprocess_input(img)
|
117 |
+
# img_data = img_resize[np.newaxis,...]
|
118 |
+
|
119 |
+
# image_pipeline = make_pipeline(
|
120 |
+
# ImageResizer(),
|
121 |
+
# ContrastEnhancer()
|
122 |
+
# )
|
123 |
+
|
124 |
+
# img_data = image_pipeline.make_pipeline(img)
|
125 |
+
new_size=(299,299)
|
126 |
+
resized_img = img.resize(new_size, Image.BICUBIC)
|
127 |
+
img_array=np.array(resized_img)
|
128 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(64, 64))
|
129 |
+
cl1 = clahe.apply(img_array)
|
130 |
+
cl2=cv2.cvtColor(cl1,cv2.COLOR_GRAY2BGR)
|
131 |
+
return cl2
|
132 |
+
except Exception as e:
|
133 |
+
st.error(f"Error processing image: {e}")
|
134 |
+
return None
|
135 |
+
|
136 |
+
|
137 |
+
# Function for returning prediction
|
138 |
+
def predict(img_data_c,img_data_m, gender):
|
139 |
+
try:
|
140 |
+
if gender == 'Female':
|
141 |
+
# gender_input = np.array([0])
|
142 |
+
gender_input=0
|
143 |
+
elif gender == 'Male':
|
144 |
+
gender_input = 1
|
145 |
+
|
146 |
+
|
147 |
+
pred =cm.predict([np.array([img_data_c]),np.array([img_data_m]), np.array([gender_input])])
|
148 |
+
# pred = model.predict(img_data)
|
149 |
+
return pred
|
150 |
+
except Exception as e:
|
151 |
+
st.error(f"Error predicting bone age: {e}")
|
152 |
+
return None
|
153 |
+
|
154 |
+
if page == 'Bone Age Predictor':
|
155 |
+
|
156 |
+
st.title('Bone Age Prediction')
|
157 |
+
|
158 |
+
|
159 |
+
# Define accepted file formats
|
160 |
+
accepted_formats = ("jpg", "jpeg", "png", "gif")
|
161 |
+
|
162 |
+
# Upload image
|
163 |
+
uploaded_file = st.file_uploader('Upload an image', type=accepted_formats)
|
164 |
+
if uploaded_file is not None:
|
165 |
+
# Adding progress bar
|
166 |
+
progress_bar = st.progress(0)
|
167 |
+
|
168 |
+
with st.spinner("Uploading..."):
|
169 |
+
for i in range(100):
|
170 |
+
time.sleep(0.1)
|
171 |
+
progress_bar.progress(i+1)
|
172 |
+
|
173 |
+
st.success("Upload Complete")
|
174 |
+
|
175 |
+
img = Image.open(uploaded_file)
|
176 |
+
st.image(img, caption='Uploaded image', use_column_width=True)
|
177 |
+
|
178 |
+
'\n'
|
179 |
+
gender = st.radio('Sex:', ('Male', 'Female'))
|
180 |
+
'\n'
|
181 |
+
|
182 |
+
if st.button('Predict bone age'):
|
183 |
+
st.write("Predicted Bone age is : ")
|
184 |
+
img_data = process_image(img)
|
185 |
+
|
186 |
+
# roi=img_extractor.RoiExtractor()
|
187 |
+
roi.process_img(img_data)
|
188 |
+
|
189 |
+
|
190 |
+
boneage = predict(roi.carpal_img,roi.metacarpal_img,gender)[0][0]
|
191 |
+
print(boneage)
|
192 |
+
# boneage=round(boneage,1)
|
193 |
+
st.write(boneage,'months')
|
194 |
+
|
195 |
+
|
196 |
+
if page == 'Resources':
|
197 |
+
st.title('Resources')
|
198 |
+
|
199 |
+
if page == "Feedback Form":
|
200 |
+
st.title("Feedback Form")
|
201 |
+
|
202 |
+
name = st.text_input("Name: ")
|
203 |
+
|
204 |
+
feedback_type = st.selectbox("Feedback Type: ",["General","Bug Report","Features"])
|
205 |
+
|
206 |
+
feedback_message = st.text_area("Feedback Message:","")
|
207 |
+
|
208 |
+
if st.button("Submit Feedback"):
|
209 |
+
#Insert feedback into the database
|
210 |
+
# cursor.execute('''INSERT INTO feedback (name,feedback_type,feedback_message) VALUES (%s,%s,%s)''',(name,feedback_type,feedback_message))
|
211 |
+
|
212 |
+
# conn.commit()
|
213 |
+
st.success("Feedback Submitted")
|
214 |
+
|
215 |
+
|
216 |
+
# cursor.close()
|
217 |
+
# conn.close()
|
218 |
+
|
219 |
+
|
combined_model.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from keras.models import Model
|
2 |
+
from keras.layers import Input
|
3 |
+
from keras.layers import Conv2D
|
4 |
+
from keras.layers import MaxPooling2D
|
5 |
+
from keras.layers import Concatenate
|
6 |
+
from keras.utils import plot_model
|
7 |
+
import tensorflow as tf
|
8 |
+
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, Input, Flatten, Dropout
|
9 |
+
from keras.layers import Activation, Concatenate, Conv2D, Multiply
|
10 |
+
from keras.applications.resnet import ResNet50
|
11 |
+
import keras
|
12 |
+
from keras.applications.inception_v3 import preprocess_input
|
13 |
+
from updated_resnet_1 import ResNet50
|
14 |
+
|
15 |
+
def build_regression_model():
|
16 |
+
base_model_carpal = ResNet50(input_shape = (224, 224, 3))
|
17 |
+
base_model_carpal.load_weights(r'F:\python\web_streamlit\ALL_IN_ONE\model_weights\model_weights\resnet\carpal_weights_30_epoch_wd_2.h5')
|
18 |
+
|
19 |
+
base_model_metacarpal = ResNet50(input_shape = (224, 224, 3))
|
20 |
+
base_model_metacarpal.load_weights(r'F:\python\web_streamlit\ALL_IN_ONE\model_weights\model_weights\resnet\metacarpal_weights_30_epoch_wd_2.h5')
|
21 |
+
|
22 |
+
gender_input = Input(shape=(1,), name='gender_input')
|
23 |
+
for layer in base_model_carpal.layers:
|
24 |
+
layer._name = 'carpal_' + layer.name
|
25 |
+
for layer in base_model_metacarpal.layers:
|
26 |
+
layer._name = 'metacarpal_' + layer.name
|
27 |
+
|
28 |
+
for layer in base_model_carpal.layers[:-9]:
|
29 |
+
layer.trainable = False
|
30 |
+
for layer in base_model_metacarpal.layers[:-9]:
|
31 |
+
layer.trainable = False
|
32 |
+
|
33 |
+
x1 = base_model_carpal.layers[-4].output
|
34 |
+
x2 = base_model_metacarpal.layers[-4].output
|
35 |
+
|
36 |
+
# gender_weight = tf.Variable(initial_value=1.0, trainable=True, name='gender_weight')
|
37 |
+
# gender_input_weighted = tf.multiply(gender_input, gender_weight)
|
38 |
+
x = Concatenate()([x1, x2, gender_input])
|
39 |
+
|
40 |
+
x = Dense(128, activation='relu')(x)
|
41 |
+
x = Dense(64, activation='relu')(x)
|
42 |
+
x = Dense(32, activation='relu')(x)
|
43 |
+
predictions = Dense(1)(x)
|
44 |
+
|
45 |
+
model = Model(inputs=(base_model_carpal.input, base_model_metacarpal.input, gender_input), outputs=predictions)
|
46 |
+
|
47 |
+
return model
|
enhance_image.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
''' his program Enhance the images Using 'CLAHE'
|
2 |
+
C-Contrast
|
3 |
+
L-Limited
|
4 |
+
A-Adaptive
|
5 |
+
H-Histogram
|
6 |
+
E-Equalization
|
7 |
+
'''
|
8 |
+
import cv2
|
9 |
+
|
10 |
+
def enhance_img(path1, path2, df):
|
11 |
+
'''
|
12 |
+
Reads image and enhances contrast using OpenCV
|
13 |
+
|
14 |
+
Parameters
|
15 |
+
----------
|
16 |
+
path1: directory where images are stored
|
17 |
+
path2: directory where enhanced images will be stored
|
18 |
+
df: dataframe
|
19 |
+
|
20 |
+
Returns
|
21 |
+
----------
|
22 |
+
Contrast-enhanced image
|
23 |
+
'''
|
24 |
+
# Create list of filenames of training images
|
25 |
+
filenames = list(df['filename'])
|
26 |
+
|
27 |
+
# Iterate through each filename
|
28 |
+
for filename in filenames:
|
29 |
+
# Open & enhance images
|
30 |
+
img = cv2.imread(path1 + filename, 0)
|
31 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(64, 64))
|
32 |
+
cl1 = clahe.apply(img)
|
33 |
+
|
34 |
+
# Save the contrast-enhanced image
|
35 |
+
cv2.imwrite(path2 + filename, cl1)
|
img_extractor.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
from PIL import Image
|
4 |
+
from sklearn.pipeline import make_pipeline
|
5 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
6 |
+
import cv2
|
7 |
+
from keras.models import Model
|
8 |
+
from keras.layers import Input
|
9 |
+
from keras.layers import Conv2D
|
10 |
+
from keras.layers import MaxPooling2D
|
11 |
+
from keras.layers import Concatenate
|
12 |
+
from keras.utils import plot_model
|
13 |
+
import tensorflow as tf
|
14 |
+
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, Input, Flatten, Dropout
|
15 |
+
from keras.layers import Activation, Concatenate, Conv2D, Multiply
|
16 |
+
from keras.applications.inception_v3 import InceptionV3
|
17 |
+
import keras
|
18 |
+
from keras.applications.inception_v3 import preprocess_input
|
19 |
+
import tensorflow as tf
|
20 |
+
from keras import layers
|
21 |
+
from sklearn.model_selection import train_test_split
|
22 |
+
import pandas as pd
|
23 |
+
|
24 |
+
import numpy as np
|
25 |
+
|
26 |
+
class RoiExtractor:
|
27 |
+
def __init__(self):
|
28 |
+
# Model initialization
|
29 |
+
self.model = build_image_model()
|
30 |
+
self.model.load_weights(r"F:\python\web_streamlit\ALL_IN_ONE\model_weights\model_weights\inception\best_final_5.h5")
|
31 |
+
self.heatmap_model = Model(inputs=self.model.inputs, outputs=self.model.layers[-3].output)
|
32 |
+
|
33 |
+
# Outputs
|
34 |
+
self.heatmap_1 = None
|
35 |
+
self.carpal_img = None
|
36 |
+
|
37 |
+
self.heatmap_2 = None
|
38 |
+
self.metacarpal_img = None
|
39 |
+
|
40 |
+
self.img = None
|
41 |
+
self.masked_img = None
|
42 |
+
|
43 |
+
def generate_heatmap(self, img):
|
44 |
+
|
45 |
+
# Preprocessing
|
46 |
+
i = preprocess_input(img)
|
47 |
+
preprocessed_img = np.expand_dims(i, axis=0)
|
48 |
+
|
49 |
+
cbam_output = self.heatmap_model.predict(preprocessed_img)[0]
|
50 |
+
|
51 |
+
#Heatmap generation
|
52 |
+
heatmap = np.sum(cbam_output, axis=-1)
|
53 |
+
heatmap = (heatmap - np.min(heatmap)) / (np.max(heatmap) - np.min(heatmap))
|
54 |
+
upsampled_heatmap = cv2.resize(heatmap, (299, 299))
|
55 |
+
return upsampled_heatmap
|
56 |
+
|
57 |
+
|
58 |
+
def get_bounding_box(self, heatmap, threshold=0.7):
|
59 |
+
binary_mask = (heatmap > threshold).astype(np.uint8)
|
60 |
+
non_zero_indices = np.nonzero(binary_mask)
|
61 |
+
if len(non_zero_indices[0]) == 0 or len(non_zero_indices[1]) == 0:
|
62 |
+
return None
|
63 |
+
min_row, min_col = np.min(non_zero_indices[0]), np.min(non_zero_indices[1])
|
64 |
+
max_row, max_col = np.max(non_zero_indices[0]), np.max(non_zero_indices[1])
|
65 |
+
|
66 |
+
bounding_box ={
|
67 |
+
'min_row': min_row,
|
68 |
+
'min_col': min_col,
|
69 |
+
'max_row': max_row,
|
70 |
+
'max_col': max_col
|
71 |
+
}
|
72 |
+
return bounding_box
|
73 |
+
|
74 |
+
|
75 |
+
def crop_image_by_bounding_box(self, image, bounding_box):
|
76 |
+
min_row, min_col = bounding_box['min_row'], bounding_box['min_col']
|
77 |
+
max_row, max_col = bounding_box['max_row'], bounding_box['max_col']
|
78 |
+
cropped_image = image[min_row:max_row + 1, min_col:max_col + 1, :]
|
79 |
+
cropped_resized = cv2.resize(cropped_image, (224, 224))
|
80 |
+
return cropped_resized
|
81 |
+
|
82 |
+
|
83 |
+
def apply_black_rectangle_by_bounding_box(self, image, bounding_box):
|
84 |
+
min_row, min_col = bounding_box['min_row'], bounding_box['min_col']
|
85 |
+
max_row, max_col = 299, bounding_box['max_col']
|
86 |
+
test_img = image.copy()
|
87 |
+
test_img = cv2.rectangle(test_img, (min_col, min_row), (max_col, max_row), (0, 0, 0), thickness=cv2.FILLED)
|
88 |
+
return test_img
|
89 |
+
|
90 |
+
|
91 |
+
def process_img(self, img):
|
92 |
+
self.img = img
|
93 |
+
self.heatmap_1 = self.generate_heatmap(img)
|
94 |
+
bounding_box_1 = self.get_bounding_box(self.heatmap_1)
|
95 |
+
self.carpal_img = self.crop_image_by_bounding_box(img, bounding_box_1)
|
96 |
+
self.masked_img = self.apply_black_rectangle_by_bounding_box(img, bounding_box_1)
|
97 |
+
self.heatmap_2 = self.generate_heatmap(self.masked_img)
|
98 |
+
bounding_box_2 = self.get_bounding_box(self.heatmap_2)
|
99 |
+
self.metacarpal_img = self.crop_image_by_bounding_box(img, bounding_box_2)
|
100 |
+
|
101 |
+
|
102 |
+
# Channel Attention Module
|
103 |
+
def channel_attention_module(x, ratio=8):
|
104 |
+
|
105 |
+
batch,_,_,channel=x.shape
|
106 |
+
# shared layers
|
107 |
+
l1 = Dense(channel//ratio, activation="relu", use_bias=False)
|
108 |
+
l2 = Dense(channel, use_bias= False)
|
109 |
+
|
110 |
+
x1 = GlobalAveragePooling2D()(x)
|
111 |
+
x1 = Flatten()(x1)
|
112 |
+
x1 = l1(x1)
|
113 |
+
x1 = l2(x1)
|
114 |
+
|
115 |
+
x2 = GlobalMaxPooling2D()(x)
|
116 |
+
x2 = Flatten()(x2)
|
117 |
+
x2 = l1(x2)
|
118 |
+
x2 = l2(x2)
|
119 |
+
|
120 |
+
feats = x1 + x2
|
121 |
+
feats = Activation("sigmoid")(feats)
|
122 |
+
feats = Multiply()([x,feats])
|
123 |
+
|
124 |
+
return feats
|
125 |
+
|
126 |
+
# spatical attention module
|
127 |
+
|
128 |
+
def spatial_attention_module(x):
|
129 |
+
# Average Pooling
|
130 |
+
x1 = tf.reduce_mean(x,axis = -1)
|
131 |
+
x1 = tf.expand_dims(x1,axis = -1)
|
132 |
+
|
133 |
+
# max pooling
|
134 |
+
x2 = tf.reduce_max(x, axis = -1)
|
135 |
+
x2 = tf.expand_dims(x2,axis=-1)
|
136 |
+
|
137 |
+
feats = Concatenate()([x1,x2])
|
138 |
+
|
139 |
+
feats = Conv2D(1,kernel_size=7, padding="same",activation="softmax")(feats)
|
140 |
+
feats = Multiply()([x,feats])
|
141 |
+
|
142 |
+
return feats
|
143 |
+
|
144 |
+
# Cbam module
|
145 |
+
def cbam(x):
|
146 |
+
x = channel_attention_module(x)
|
147 |
+
x = spatial_attention_module(x)
|
148 |
+
|
149 |
+
return x
|
150 |
+
|
151 |
+
|
152 |
+
def build_image_model():
|
153 |
+
base_model = InceptionV3(
|
154 |
+
weights='imagenet',
|
155 |
+
include_top=False,
|
156 |
+
input_shape=(299,299,3)
|
157 |
+
)
|
158 |
+
for layer in base_model.layers:
|
159 |
+
layer.trainable = False
|
160 |
+
x = cbam(base_model.output)
|
161 |
+
|
162 |
+
x= GlobalAveragePooling2D()(x)
|
163 |
+
predictions = Dense(1, activation='sigmoid')(x)
|
164 |
+
|
165 |
+
model = Model(inputs=base_model.input, outputs=predictions)
|
166 |
+
|
167 |
+
return model
|
inception_v3_cbam_arch.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#importing libraries
|
2 |
+
from keras.models import Model
|
3 |
+
from keras.layers import Input
|
4 |
+
from keras.layers import MaxPooling2D
|
5 |
+
from keras.utils import plot_model
|
6 |
+
from keras.layers import BatchNormalization
|
7 |
+
from keras.layers import AveragePooling2D
|
8 |
+
import tensorflow as tf
|
9 |
+
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, Input
|
10 |
+
from keras.layers import Activation, Concatenate, Conv2D, Multiply
|
11 |
+
|
12 |
+
|
13 |
+
Img_input = Input(shape= (299,299,3))
|
14 |
+
channel_axis = 3
|
15 |
+
|
16 |
+
# In inception v3 architecture, Every convolutin layer had batch normalization and relu activation function
|
17 |
+
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1,1)):
|
18 |
+
x=Conv2D(filters, (num_row, num_col), strides=strides, padding=padding)(x)
|
19 |
+
x=BatchNormalization(axis=3, scale=False)(x)
|
20 |
+
x=Activation('relu')(x)
|
21 |
+
return x
|
22 |
+
|
23 |
+
#Inception Block-A
|
24 |
+
def inc_block_a(x):
|
25 |
+
branch1x1 = conv2d_bn(x,64,1,1)
|
26 |
+
|
27 |
+
branch3x3 = conv2d_bn(x,48,1,1)
|
28 |
+
branch3x3 = conv2d_bn(branch3x3,64,3,3)
|
29 |
+
|
30 |
+
branch3x3db1 = conv2d_bn(x,64,1,1)
|
31 |
+
branch3x3db1 = conv2d_bn(branch3x3db1,96,3,3)
|
32 |
+
branch3x3db1 = conv2d_bn(branch3x3db1,96,3,3)
|
33 |
+
|
34 |
+
branch_pool = AveragePooling2D((3,3), strides=(1,1), padding='same')(x)
|
35 |
+
branch_pool = conv2d_bn(branch_pool,32,1,1)
|
36 |
+
|
37 |
+
x = Concatenate(axis= channel_axis)([branch1x1,branch3x3,branch3x3db1,branch_pool])
|
38 |
+
return x
|
39 |
+
|
40 |
+
#Reduction Block-A
|
41 |
+
def reduction_block_a(x):
|
42 |
+
branch3x3 = conv2d_bn(x,384,3,3,strides=(2,2),padding='valid')
|
43 |
+
|
44 |
+
branch3x3db1 = conv2d_bn(x,64,1,1)
|
45 |
+
branch3x3db1 = conv2d_bn(branch3x3db1,96,3,3)
|
46 |
+
branch3x3db1 = conv2d_bn(branch3x3db1, 96,3,3,strides=(2,2),padding='valid')
|
47 |
+
|
48 |
+
branch_pool = MaxPooling2D((3,3),strides=(2,2))(x)
|
49 |
+
|
50 |
+
x=Concatenate(axis=channel_axis)([branch3x3,branch3x3db1,branch_pool])
|
51 |
+
|
52 |
+
return x
|
53 |
+
|
54 |
+
#Inception Block-B
|
55 |
+
def inc_block_b(x):
|
56 |
+
branch1x1 = conv2d_bn(x,192,1,1)
|
57 |
+
|
58 |
+
branch7x7 = conv2d_bn(x,128,1,1)
|
59 |
+
branch7x7 = conv2d_bn(branch7x7,128,1,7)
|
60 |
+
branch7x7 = conv2d_bn(branch7x7,192,7,1)
|
61 |
+
|
62 |
+
branch7x7db1 = conv2d_bn(x,128,1,1)
|
63 |
+
branch7x7db1 = conv2d_bn(branch7x7db1,128,7,1)
|
64 |
+
branch7x7db1 = conv2d_bn(branch7x7db1,128,1,7)
|
65 |
+
branch7x7db1 = conv2d_bn(branch7x7db1,128,7,1)
|
66 |
+
branch7x7db1 = conv2d_bn(branch7x7db1,192,1,7)
|
67 |
+
|
68 |
+
branch_pool = AveragePooling2D((3,3),strides=(1,1),padding='same')(x)
|
69 |
+
branch_pool = conv2d_bn(branch_pool,192,1,1)
|
70 |
+
|
71 |
+
x = Concatenate(axis = channel_axis)([branch1x1,branch7x7,branch7x7db1,branch_pool])
|
72 |
+
|
73 |
+
return x
|
74 |
+
|
75 |
+
#Reduction Block-B
|
76 |
+
def reduction_block_b(x):
|
77 |
+
branch3x3 = conv2d_bn(x,192,1,1)
|
78 |
+
branch3x3 = conv2d_bn(branch3x3, 320,3,3,strides=(2,2),padding='valid')
|
79 |
+
|
80 |
+
branch7x7x3 = conv2d_bn(x,192,1,1)
|
81 |
+
branch7x7x3 = conv2d_bn(branch7x7x3,192,1,7)
|
82 |
+
branch7x7x3 = conv2d_bn(branch7x7x3,192,7,1)
|
83 |
+
branch7x7x3 = conv2d_bn(branch7x7x3,192,3,3,strides=(2,2),padding='valid')
|
84 |
+
|
85 |
+
branch_pool = MaxPooling2D((3,3),strides=(2,2))(x)
|
86 |
+
|
87 |
+
x = Concatenate( axis=channel_axis)([branch3x3,branch7x7x3,branch_pool])
|
88 |
+
|
89 |
+
return x
|
90 |
+
|
91 |
+
#Inception Block-C
|
92 |
+
def inc_block_c(x):
|
93 |
+
branch1x1 = conv2d_bn(x,320,1,1)
|
94 |
+
|
95 |
+
branch3x3 = conv2d_bn(x,384,1,1)
|
96 |
+
branch3x3_1 = conv2d_bn(branch3x3,384,1,3)
|
97 |
+
branch3x3_2 = conv2d_bn(branch3x3,384,3,1)
|
98 |
+
branch3x3 = Concatenate(axis=channel_axis)([branch3x3_1,branch3x3_2])
|
99 |
+
|
100 |
+
branch3x3db1 = conv2d_bn(x,448,1,1)
|
101 |
+
branch3x3db1 = conv2d_bn(branch3x3db1,384,3,3)
|
102 |
+
branch3x3db1_1 = conv2d_bn(branch3x3db1,384,1,3)
|
103 |
+
branch3x3db1_2 = conv2d_bn(branch3x3db1,384,3,1)
|
104 |
+
branch3x3db1 = Concatenate(axis= channel_axis)([branch3x3db1_1,branch3x3db1_2])
|
105 |
+
|
106 |
+
branch_pool = AveragePooling2D((3,3),strides=(1,1),padding='same')(x)
|
107 |
+
branch_pool = conv2d_bn(branch_pool,192,1,1)
|
108 |
+
|
109 |
+
x= Concatenate(axis=channel_axis)([branch1x1, branch3x3, branch3x3db1,branch_pool])
|
110 |
+
|
111 |
+
return x
|
112 |
+
|
113 |
+
|
114 |
+
# Channel Attention Module
|
115 |
+
# Channel Attention Module
|
116 |
+
def channel_attention_module(x, ratio=8):
|
117 |
+
batch,_,_,channel=x.shape
|
118 |
+
# shared layers
|
119 |
+
l1 = Dense(channel//ratio, activation="relu", use_bias=False)
|
120 |
+
l2 = Dense(channel, use_bias= False)
|
121 |
+
|
122 |
+
x1 = GlobalAveragePooling2D()(x)
|
123 |
+
x1 = l1(x1)
|
124 |
+
x1 = l2(x1)
|
125 |
+
|
126 |
+
x2 = GlobalMaxPooling2D()(x)
|
127 |
+
x2 = l1(x2)
|
128 |
+
x2 = l2(x2)
|
129 |
+
|
130 |
+
feats = x1 + x2
|
131 |
+
feats = Activation("sigmoid")(feats)
|
132 |
+
feats = Multiply()([x,feats])
|
133 |
+
|
134 |
+
return feats
|
135 |
+
|
136 |
+
# Spatial Attention Module
|
137 |
+
# spatical attention module
|
138 |
+
|
139 |
+
def spatial_attention_module(x):
|
140 |
+
# Average Pooling
|
141 |
+
x1 = tf.reduce_mean(x,axis = -1)
|
142 |
+
x1 = tf.expand_dims(x1,axis = -1)
|
143 |
+
|
144 |
+
# max pooling
|
145 |
+
x2 = tf.reduce_max(x, axis = -1)
|
146 |
+
x2 = tf.expand_dims(x2,axis=-1)
|
147 |
+
|
148 |
+
feats = Concatenate()([x1,x2])
|
149 |
+
|
150 |
+
feats = Conv2D(1,kernel_size=7, padding="same",activation="sigmoid")(feats)
|
151 |
+
feats = Multiply()([x,feats])
|
152 |
+
|
153 |
+
return feats
|
154 |
+
|
155 |
+
# CBAM (Convolutional Block Attention Mechanism) Modules
|
156 |
+
def cbam(x):
|
157 |
+
x = channel_attention_module(x)
|
158 |
+
x = spatial_attention_module(x)
|
159 |
+
|
160 |
+
return x
|
161 |
+
|
162 |
+
# Building model layer by layer integrating CBAM
|
163 |
+
def inception_cbam_model(Img_input):
|
164 |
+
x = conv2d_bn(Img_input, 32,3,3,strides=(2,2),padding='valid')
|
165 |
+
x = conv2d_bn(x,32,3,3,padding='valid')
|
166 |
+
x = conv2d_bn(x,64,3,3)
|
167 |
+
x = MaxPooling2D((3,3), strides=(2,2))(x)
|
168 |
+
|
169 |
+
x = conv2d_bn(x,80,1,1,padding='valid')
|
170 |
+
x = conv2d_bn(x,192,3,3,padding='valid')
|
171 |
+
x=MaxPooling2D((3,3),strides=(2,2))(x)
|
172 |
+
|
173 |
+
x = inc_block_a(x)
|
174 |
+
x = inc_block_a(x)
|
175 |
+
x = inc_block_a(x)
|
176 |
+
|
177 |
+
x = reduction_block_a(x)
|
178 |
+
|
179 |
+
x = inc_block_b(x)
|
180 |
+
x = inc_block_b(x)
|
181 |
+
x = inc_block_b(x)
|
182 |
+
x = inc_block_b(x)
|
183 |
+
|
184 |
+
x = reduction_block_b(x)
|
185 |
+
|
186 |
+
x = inc_block_c(x)
|
187 |
+
x = inc_block_c(x)
|
188 |
+
|
189 |
+
x = AveragePooling2D()(x)
|
190 |
+
|
191 |
+
base_model = x
|
192 |
+
|
193 |
+
model = cbam(base_model)
|
194 |
+
|
195 |
+
# Add additional layers
|
196 |
+
# model.add(layers.Flatten())
|
197 |
+
# model.add(layers.Dense(256, activation='relu'))
|
198 |
+
# model.add(layers.Dropout(0.5))
|
199 |
+
# model.add(layers.Dense(1, activation='linear')) # regression task for bone age
|
200 |
+
# create model
|
201 |
+
inputs = Img_input
|
202 |
+
model = Model(inputs,model,name='inception_v3')
|
203 |
+
|
204 |
+
return model
|
205 |
+
|
206 |
+
image_model = inception_cbam_model(Img_input)
|
207 |
+
print(image_model.summary())
|
208 |
+
|
209 |
+
# Compile the model
|
210 |
+
image_model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
|
211 |
+
|
212 |
+
|
213 |
+
|
process_input_pipeline.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sklearn.pipeline import make_pipeline
|
2 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
3 |
+
import cv2
|
4 |
+
|
5 |
+
# Define custom transformers for image processing steps
|
6 |
+
class ImageResizer(BaseEstimator, TransformerMixin):
|
7 |
+
def __init__(self, target_size=(299, 299)):
|
8 |
+
self.target_size = target_size
|
9 |
+
|
10 |
+
def fit(self, X, y=None):
|
11 |
+
return self
|
12 |
+
|
13 |
+
def transform(self, X):
|
14 |
+
resized_images = [cv2.resize(img, self.target_size) for img in X]
|
15 |
+
return resized_images
|
16 |
+
|
17 |
+
class ContrastEnhancer(BaseEstimator, TransformerMixin):
|
18 |
+
def __init__(self, clip_limit=2.0, tile_grid_size=(8, 8)):
|
19 |
+
self.clip_limit = clip_limit
|
20 |
+
self.tile_grid_size = tile_grid_size
|
21 |
+
|
22 |
+
def fit(self, X, y=None):
|
23 |
+
return self
|
24 |
+
|
25 |
+
def transform(self, X):
|
26 |
+
enhanced_images = [self._enhance_contrast(img) for img in X]
|
27 |
+
return enhanced_images
|
28 |
+
|
29 |
+
def _enhance_contrast(self, image):
|
30 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
31 |
+
clahe = cv2.createCLAHE(clipLimit=self.clip_limit, tileGridSize=self.tile_grid_size)
|
32 |
+
enhanced = clahe.apply(gray)
|
33 |
+
return cv2.cvtColor(enhanced, cv2.COLOR_GRAY2BGR)
|
34 |
+
|
35 |
+
# Define the pipeline
|
36 |
+
|
37 |
+
# image_pipeline = make_pipeline(
|
38 |
+
# ImageResizer(),
|
39 |
+
# ContrastEnhancer()
|
40 |
+
# )
|
41 |
+
|
42 |
+
# Example usage
|
43 |
+
# uploaded_image = cv2.imread() # Load uploaded image
|
44 |
+
# enhanced_image = image_pipeline.transform([uploaded_image])[0] # Apply pipeline
|
45 |
+
|
resize_image_inception.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This Program Resize the image into shape (299,299)
|
2 |
+
|
3 |
+
from PIL import Image
|
4 |
+
import os
|
5 |
+
|
6 |
+
def resize_images(input_folder, output_folder, new_size=(224, 224)):
|
7 |
+
# check whether the output folder exists ?
|
8 |
+
if not os.path.exists(output_folder):
|
9 |
+
os.makedirs(output_folder)
|
10 |
+
|
11 |
+
# Loop through all files in the input folder
|
12 |
+
for filename in os.listdir(input_folder):
|
13 |
+
# Check if the file is a valid image file
|
14 |
+
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):
|
15 |
+
input_path = os.path.join(input_folder, filename)
|
16 |
+
output_path = os.path.join(output_folder, filename)
|
17 |
+
|
18 |
+
# Open and resize the image
|
19 |
+
with Image.open(input_path) as img:
|
20 |
+
resized_img = img.resize(new_size, Image.BICUBIC)
|
21 |
+
|
22 |
+
# Save the resized image to the output folder
|
23 |
+
resized_img.save(output_path)
|
24 |
+
|
25 |
+
|
26 |
+
if __name__ == "__main__":
|
27 |
+
# Specify the input and output folders
|
28 |
+
input_folder = "D:/dataset/boneage-test-dataset/boneage-validation-dataset/"
|
29 |
+
output_folder = "D:/dataset/resize_image/boneage-validation-dataset/"
|
30 |
+
|
31 |
+
# Specify the new size for the images
|
32 |
+
new_size = (299,299)
|
33 |
+
|
34 |
+
# Resize the images in the input folder and save them to the output folder
|
35 |
+
resize_images(input_folder, output_folder, new_size)
|
resnet50_arch.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import os
|
5 |
+
import tensorflow as tf
|
6 |
+
import numpy as np
|
7 |
+
from keras import layers
|
8 |
+
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
|
9 |
+
from keras.models import Model, load_model
|
10 |
+
from keras.preprocessing import image
|
11 |
+
from keras.applications.imagenet_utils import preprocess_input
|
12 |
+
from keras.utils import plot_model
|
13 |
+
from matplotlib.pyplot import imshow
|
14 |
+
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
|
15 |
+
|
16 |
+
# x is input, y=F(x)
|
17 |
+
# identity block simply means input should be equal to output.
|
18 |
+
# y = x + F(x) the layers in a traditional network are learning the true output H(x)
|
19 |
+
# F(x) = y - x the layers in a residual network are learning the residual F(x)
|
20 |
+
# Hence, the name: Residual Block.
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
def identity_block(X, f, filters, stage, block):
|
25 |
+
|
26 |
+
"""
|
27 |
+
Arguments:
|
28 |
+
X -- input of shape (m, height, width, channel)
|
29 |
+
f -- shape of the middle CONV's window for the main path
|
30 |
+
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
|
31 |
+
stage -- integer, used to name the layers, depending on their position in the network
|
32 |
+
block -- string/character, used to name the layers, depending on their position in the network
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
|
36 |
+
"""
|
37 |
+
|
38 |
+
# defining name basis
|
39 |
+
conv_name_base = 'res' + str(stage) + block + '_branch'
|
40 |
+
bn_name_base = 'bn' + str(stage) + block + '_branch'
|
41 |
+
|
42 |
+
# Retrieve Filters
|
43 |
+
F1, F2, F3 = filters
|
44 |
+
|
45 |
+
# Saving the input value.we need this later to add to the output.
|
46 |
+
X_shortcut = X
|
47 |
+
|
48 |
+
# First component of main path
|
49 |
+
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a')(X)
|
50 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
|
51 |
+
X = Activation('relu')(X)
|
52 |
+
|
53 |
+
|
54 |
+
# Second component of main path (β3 lines)
|
55 |
+
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b')(X)
|
56 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
|
57 |
+
X = Activation('relu')(X)
|
58 |
+
|
59 |
+
# Third component of main path (β2 lines)
|
60 |
+
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c')(X)
|
61 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
|
62 |
+
|
63 |
+
# Final step: Add shortcut value to main path, and pass it through a RELU activation
|
64 |
+
X = Add()([X, X_shortcut])
|
65 |
+
X = Activation('relu')(X)
|
66 |
+
|
67 |
+
|
68 |
+
return X
|
69 |
+
|
70 |
+
|
71 |
+
# CONVOLUTIONAL BLOCK
|
72 |
+
def convolutional_block(X, f, filters, stage, block, s = 2):
|
73 |
+
|
74 |
+
# defining name basis
|
75 |
+
conv_name_base = 'res' + str(stage) + block + '_branch'
|
76 |
+
bn_name_base = 'bn' + str(stage) + block + '_branch'
|
77 |
+
|
78 |
+
# Retrieve Filters
|
79 |
+
F1, F2, F3 = filters
|
80 |
+
|
81 |
+
# Save the input value
|
82 |
+
X_shortcut = X
|
83 |
+
|
84 |
+
|
85 |
+
# First layer
|
86 |
+
X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a')(X) # 1,1 is filter size
|
87 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) # normalization on channels
|
88 |
+
X = Activation('relu')(X)
|
89 |
+
|
90 |
+
|
91 |
+
# Second layer (f,f)=3*3 filter by default
|
92 |
+
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b')(X)
|
93 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
|
94 |
+
X = Activation('relu')(X)
|
95 |
+
|
96 |
+
|
97 |
+
# Third layer
|
98 |
+
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c')(X)
|
99 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
|
100 |
+
|
101 |
+
|
102 |
+
##### SHORTCUT PATH ####
|
103 |
+
X_shortcut = Conv2D(filters = F3, kernel_size = (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '1')(X_shortcut)
|
104 |
+
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
|
105 |
+
|
106 |
+
# Final step: Add shortcut value here, and pass it through a RELU activation
|
107 |
+
X = Add()([X, X_shortcut])
|
108 |
+
X = Activation('relu')(X)
|
109 |
+
|
110 |
+
|
111 |
+
return X
|
112 |
+
|
113 |
+
# CREATING RESNET50
|
114 |
+
#Each ResNet block is either 2 layer deep
|
115 |
+
def ResNet50(input_shape=(224, 224, 3)):
|
116 |
+
"""
|
117 |
+
Implementation of the ResNet50 architecture:
|
118 |
+
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
|
119 |
+
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
|
120 |
+
|
121 |
+
"""
|
122 |
+
|
123 |
+
# Define the input as a tensor with shape input_shape
|
124 |
+
X_input = Input(input_shape)
|
125 |
+
|
126 |
+
# Zero-Padding
|
127 |
+
X = ZeroPadding2D((3, 3))(X_input) #3,3 padding
|
128 |
+
|
129 |
+
# Stage 1
|
130 |
+
X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X) #64 filters of 7*7
|
131 |
+
X = BatchNormalization(axis=3, name='bn_conv1')(X) #batchnorm applied on channels
|
132 |
+
X = Activation('relu')(X)
|
133 |
+
X = MaxPooling2D((3, 3), strides=(2, 2))(X) #window size is 3*3
|
134 |
+
|
135 |
+
# Stage 2
|
136 |
+
X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)
|
137 |
+
|
138 |
+
|
139 |
+
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
|
140 |
+
|
141 |
+
|
142 |
+
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
|
143 |
+
|
144 |
+
|
145 |
+
### START CODE HERE ###
|
146 |
+
|
147 |
+
# Stage 3
|
148 |
+
X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)
|
149 |
+
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
|
150 |
+
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
|
151 |
+
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
|
152 |
+
|
153 |
+
# Stage 4
|
154 |
+
X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
|
155 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
|
156 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
|
157 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
|
158 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
|
159 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
|
160 |
+
|
161 |
+
# Stage 5
|
162 |
+
X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)
|
163 |
+
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
|
164 |
+
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
|
165 |
+
|
166 |
+
# AVGPOOL
|
167 |
+
X = AveragePooling2D((2,2), name="avg_pool")(X)
|
168 |
+
|
169 |
+
### END CODE HERE ###
|
170 |
+
|
171 |
+
# output layer
|
172 |
+
X = Flatten()(X)
|
173 |
+
|
174 |
+
X = Dense(1,activation='relu', name='output')(X)
|
175 |
+
|
176 |
+
# Create model
|
177 |
+
model = Model(inputs = X_input, outputs = X, name='ResNet50')
|
178 |
+
|
179 |
+
return model
|
180 |
+
|
181 |
+
model_resnet = ResNet50()
|
182 |
+
print(model_resnet.summary())
|
testing_age.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image,ImageOps
|
6 |
+
import numpy as np
|
7 |
+
import random
|
8 |
+
import cv2
|
9 |
+
import tensorflow as tf
|
10 |
+
from tensorflow import keras
|
11 |
+
from keras.models import load_model
|
12 |
+
|
13 |
+
import time
|
14 |
+
# import mysql.connector
|
15 |
+
|
16 |
+
import process_input_pipeline as pp
|
17 |
+
import img_extractor
|
18 |
+
from process_input_pipeline import ImageResizer, ContrastEnhancer
|
19 |
+
|
20 |
+
# added for inception concat
|
21 |
+
from sklearn.pipeline import make_pipeline
|
22 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
23 |
+
|
24 |
+
import combined_model
|
25 |
+
|
26 |
+
cm = combined_model.build_regression_model()
|
27 |
+
cm.load_weights(r"F:\python\web_streamlit\ALL_IN_ONE\model_weights\model_weights\combined_weights_7_epoch_third_acc.h5")
|
28 |
+
|
29 |
+
roi=img_extractor.RoiExtractor()
|
30 |
+
|
31 |
+
def process_image(img, img_size=(224, 224)):
|
32 |
+
try:
|
33 |
+
new_size=(299,299)
|
34 |
+
resized_img = img.resize(new_size, Image.BICUBIC)
|
35 |
+
img_array=np.array(resized_img)
|
36 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(64, 64))
|
37 |
+
cl1 = clahe.apply(img_array)
|
38 |
+
cl2=cv2.cvtColor(cl1,cv2.COLOR_GRAY2BGR)
|
39 |
+
return cl2
|
40 |
+
except Exception as e:
|
41 |
+
st.error(f"Error processing image: {e}")
|
42 |
+
return None
|
43 |
+
|
44 |
+
def predict(img_data_c,img_data_m, gender):
|
45 |
+
try:
|
46 |
+
if gender == 'Female':
|
47 |
+
# gender_input = np.array([0])
|
48 |
+
gender_input=0
|
49 |
+
elif gender == 'Male':
|
50 |
+
gender_input = 1
|
51 |
+
|
52 |
+
|
53 |
+
pred =cm.predict([np.array([img_data_c]),np.array([img_data_m]), np.array([gender_input])])
|
54 |
+
# pred = model.predict(img_data)
|
55 |
+
return pred
|
56 |
+
except Exception as e:
|
57 |
+
st.error(f"Error predicting bone age: {e}")
|
58 |
+
return None
|
updated_resnet_1.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from keras import layers
|
3 |
+
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
|
4 |
+
from keras.models import Model, load_model
|
5 |
+
from keras.preprocessing import image
|
6 |
+
# from keras.utils import layer_utils
|
7 |
+
# from keras.utils.data_utils import get_file
|
8 |
+
from keras.applications.imagenet_utils import preprocess_input
|
9 |
+
# from IPython.display import SVG
|
10 |
+
# from keras.utils.vis_utils import model_to_dot
|
11 |
+
from keras.utils import plot_model
|
12 |
+
|
13 |
+
import scipy.misc
|
14 |
+
from matplotlib.pyplot import imshow
|
15 |
+
|
16 |
+
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
|
17 |
+
|
18 |
+
|
19 |
+
# x is input, y=F(x)
|
20 |
+
# identity block simply means input should be equal to output.
|
21 |
+
# y = x + F(x) the layers in a traditional network are learning the true output H(x)
|
22 |
+
# F(x) = y - x the layers in a residual network are learning the residual F(x)
|
23 |
+
# Hence, the name: Residual Block.
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
def identity_block(X, f, filters, stage, block):
|
28 |
+
"""
|
29 |
+
|
30 |
+
Arguments:
|
31 |
+
X -- input of shape (m, height, width, channel)
|
32 |
+
f -- shape of the middle CONV's window for the main path
|
33 |
+
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
|
34 |
+
stage -- integer, used to name the layers, depending on their position in the network
|
35 |
+
block -- string/character, used to name the layers, depending on their position in the network
|
36 |
+
|
37 |
+
Returns:
|
38 |
+
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
|
39 |
+
"""
|
40 |
+
|
41 |
+
# defining name basis
|
42 |
+
conv_name_base = 'res' + str(stage) + block + '_branch'
|
43 |
+
bn_name_base = 'bn' + str(stage) + block + '_branch'
|
44 |
+
|
45 |
+
# Retrieve Filters
|
46 |
+
F1, F2, F3 = filters
|
47 |
+
|
48 |
+
# Saving the input value.we need this later to add to the output.
|
49 |
+
X_shortcut = X
|
50 |
+
|
51 |
+
# First component of main path
|
52 |
+
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a')(X)
|
53 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
|
54 |
+
X = Activation('relu')(X)
|
55 |
+
|
56 |
+
|
57 |
+
# Second component of main path (β3 lines)
|
58 |
+
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b')(X)
|
59 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
|
60 |
+
X = Activation('relu')(X)
|
61 |
+
|
62 |
+
# Third component of main path (β2 lines)
|
63 |
+
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c')(X)
|
64 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
|
65 |
+
|
66 |
+
# Final step: Add shortcut value to main path, and pass it through a RELU activation
|
67 |
+
X = Add()([X, X_shortcut])
|
68 |
+
X = Activation('relu')(X)
|
69 |
+
|
70 |
+
|
71 |
+
return X
|
72 |
+
|
73 |
+
|
74 |
+
def convolutional_block(X, f, filters, stage, block, s = 2):
|
75 |
+
|
76 |
+
# defining name basis
|
77 |
+
conv_name_base = 'res' + str(stage) + block + '_branch'
|
78 |
+
bn_name_base = 'bn' + str(stage) + block + '_branch'
|
79 |
+
|
80 |
+
# Retrieve Filters
|
81 |
+
F1, F2, F3 = filters
|
82 |
+
|
83 |
+
# Save the input value
|
84 |
+
X_shortcut = X
|
85 |
+
|
86 |
+
|
87 |
+
# First layer
|
88 |
+
X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a')(X) # 1,1 is filter size
|
89 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) # normalization on channels
|
90 |
+
X = Activation('relu')(X)
|
91 |
+
|
92 |
+
|
93 |
+
# Second layer (f,f)=3*3 filter by default
|
94 |
+
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b')(X)
|
95 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
|
96 |
+
X = Activation('relu')(X)
|
97 |
+
|
98 |
+
|
99 |
+
# Third layer
|
100 |
+
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c')(X)
|
101 |
+
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
|
102 |
+
|
103 |
+
|
104 |
+
##### SHORTCUT PATH ####
|
105 |
+
X_shortcut = Conv2D(filters = F3, kernel_size = (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '1')(X_shortcut)
|
106 |
+
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
|
107 |
+
|
108 |
+
# Final step: Add shortcut value here, and pass it through a RELU activation
|
109 |
+
X = Add()([X, X_shortcut])
|
110 |
+
X = Activation('relu')(X)
|
111 |
+
|
112 |
+
|
113 |
+
return X
|
114 |
+
|
115 |
+
#Each ResNet block is either 2 layer deep
|
116 |
+
def ResNet50(input_shape=(64, 64, 3)):
|
117 |
+
"""
|
118 |
+
Implementation of the ResNet50 architecture:
|
119 |
+
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
|
120 |
+
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
|
121 |
+
|
122 |
+
"""
|
123 |
+
|
124 |
+
# Define the input as a tensor with shape input_shape
|
125 |
+
X_input = Input(input_shape)
|
126 |
+
|
127 |
+
# Zero-Padding
|
128 |
+
X = ZeroPadding2D((3, 3))(X_input) #3,3 padding
|
129 |
+
|
130 |
+
# Stage 1
|
131 |
+
X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X) #64 filters of 7*7
|
132 |
+
X = BatchNormalization(axis=3, name='bn_conv1')(X) #batchnorm applied on channels
|
133 |
+
X = Activation('relu')(X)
|
134 |
+
X = MaxPooling2D((3, 3), strides=(2, 2))(X) #window size is 3*3
|
135 |
+
|
136 |
+
# Stage 2
|
137 |
+
X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)
|
138 |
+
|
139 |
+
|
140 |
+
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
|
141 |
+
|
142 |
+
|
143 |
+
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
|
144 |
+
|
145 |
+
|
146 |
+
### START CODE HERE ###
|
147 |
+
|
148 |
+
# Stage 3
|
149 |
+
X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)
|
150 |
+
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
|
151 |
+
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
|
152 |
+
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
|
153 |
+
|
154 |
+
# Stage 4
|
155 |
+
X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
|
156 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
|
157 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
|
158 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
|
159 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
|
160 |
+
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
|
161 |
+
|
162 |
+
# Stage 5
|
163 |
+
X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)
|
164 |
+
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
|
165 |
+
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
|
166 |
+
|
167 |
+
# AVGPOOL
|
168 |
+
X = AveragePooling2D((2,2), name="avg_pool")(X)
|
169 |
+
|
170 |
+
### END CODE HERE ###
|
171 |
+
|
172 |
+
# output layer
|
173 |
+
X = Flatten()(X)
|
174 |
+
X = Dense(512, activation='relu')(X)
|
175 |
+
X = Dense(128, activation='relu')(X)
|
176 |
+
X = Dense(1, name='output')(X)
|
177 |
+
|
178 |
+
# Create model
|
179 |
+
model = Model(inputs = X_input, outputs = X, name='ResNet50')
|
180 |
+
|
181 |
+
return model
|