RANA
commited on
Commit
•
7e3b597
1
Parent(s):
d46ed0a
u1
Browse files- Dockerfile +15 -0
- app.py +49 -0
- model/__pycache__/btd.cpython-39.pyc +0 -0
- model/btd.py +97 -0
- requirements.txt +0 -0
- weights/fused-model-weights.h5 +3 -0
Dockerfile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
COPY requirements.txt .
|
4 |
+
|
5 |
+
RUN pip install -r requirements.txt
|
6 |
+
|
7 |
+
RUN apt-get update && apt-get install -y libgl1-mesa-glx
|
8 |
+
|
9 |
+
RUN python -c "import cv2; import numpy"
|
10 |
+
|
11 |
+
COPY . .
|
12 |
+
|
13 |
+
EXPOSE 7860
|
14 |
+
|
15 |
+
CMD ["flask", "run", "--host=0.0.0.0", "--port=7860"]
|
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, render_template, request
|
2 |
+
from model import btd
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
app = Flask(__name__)
|
7 |
+
|
8 |
+
@app.route("/", methods=["GET", "POST"])
|
9 |
+
def index():
|
10 |
+
message = "Welcome to the API! Here are the available endpoints:"
|
11 |
+
fm_url = request.host_url + "fusion-predicition"
|
12 |
+
response = {
|
13 |
+
"message": message,
|
14 |
+
"endpoints": {
|
15 |
+
"fused_prediction": fm_url,
|
16 |
+
}
|
17 |
+
}
|
18 |
+
return response, 200
|
19 |
+
|
20 |
+
@app.route("/fusion-predicition", methods=["GET", "POST"])
|
21 |
+
def test():
|
22 |
+
result = None
|
23 |
+
if request.method == "POST":
|
24 |
+
if 'img1' not in request.files or 'img2' not in request.files or 'img3' not in request.files:
|
25 |
+
return "No file uploaded for Tumor prediction", 400
|
26 |
+
|
27 |
+
img1 = request.files['img1']
|
28 |
+
img2 = request.files['img2']
|
29 |
+
img3 = request.files['img3']
|
30 |
+
|
31 |
+
img_array1 = cv2.imdecode(np.frombuffer(
|
32 |
+
img1.read(), np.uint8), cv2.IMREAD_COLOR)
|
33 |
+
img_array2 = cv2.imdecode(np.frombuffer(
|
34 |
+
img2.read(), np.uint8), cv2.IMREAD_COLOR)
|
35 |
+
img_array3 = cv2.imdecode(np.frombuffer(
|
36 |
+
img3.read(), np.uint8), cv2.IMREAD_COLOR)
|
37 |
+
model = btd.FusedFuctionModel(img_array1, img_array2, img_array3)
|
38 |
+
|
39 |
+
response = {
|
40 |
+
"Tumor Predicition": model
|
41 |
+
}
|
42 |
+
|
43 |
+
result = response
|
44 |
+
|
45 |
+
return result
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == "__main__":
|
49 |
+
app.run(debug=True)
|
model/__pycache__/btd.cpython-39.pyc
ADDED
Binary file (2.09 kB). View file
|
|
model/btd.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from keras.models import Sequential , Model
|
4 |
+
from tensorflow.keras.applications.vgg19 import VGG19
|
5 |
+
from tensorflow.keras.applications.mobilenet import MobileNet
|
6 |
+
from keras.layers import Dense
|
7 |
+
from tensorflow.keras.applications.inception_v3 import InceptionV3
|
8 |
+
from keras.layers import Concatenate, Flatten
|
9 |
+
|
10 |
+
|
11 |
+
def FusedFuctionModel(img1, img2, img3):
|
12 |
+
inception_model_1 = Sequential()
|
13 |
+
|
14 |
+
inception = InceptionV3(weights="imagenet",
|
15 |
+
include_top=False,
|
16 |
+
input_shape=(128, 128, 3))
|
17 |
+
|
18 |
+
for layer1 in inception.layers:
|
19 |
+
layer1.trainable = False
|
20 |
+
|
21 |
+
inception_model_1.add(inception)
|
22 |
+
|
23 |
+
inception_model_1.add(Flatten())
|
24 |
+
inception_model_1.add(Dense(1024, activation="relu"))
|
25 |
+
inception_model_1.add(Dense(1024, activation="relu"))
|
26 |
+
inception_model_1.add(Dense(512, activation="relu"))
|
27 |
+
inception_model_1.add(Dense(128, activation="relu"))
|
28 |
+
|
29 |
+
mobilenet_model_2 = Sequential()
|
30 |
+
|
31 |
+
mobilenet = MobileNet(weights="imagenet",
|
32 |
+
include_top=False,
|
33 |
+
input_shape=(128, 128, 3))
|
34 |
+
|
35 |
+
for layer2 in mobilenet.layers:
|
36 |
+
layer2.trainable = False
|
37 |
+
|
38 |
+
mobilenet_model_2.add(mobilenet)
|
39 |
+
|
40 |
+
mobilenet_model_2.add(Flatten())
|
41 |
+
mobilenet_model_2.add(Dense(1024, activation="relu"))
|
42 |
+
mobilenet_model_2.add(Dense(1024, activation="relu"))
|
43 |
+
mobilenet_model_2.add(Dense(512, activation="relu"))
|
44 |
+
mobilenet_model_2.add(Dense(128, activation="relu"))
|
45 |
+
|
46 |
+
vgg19_model_3 = Sequential()
|
47 |
+
|
48 |
+
vgg19 = VGG19(weights="imagenet",
|
49 |
+
include_top=False,
|
50 |
+
input_shape=(128, 128, 3))
|
51 |
+
|
52 |
+
for layer3 in vgg19.layers:
|
53 |
+
layer3.trainable = False
|
54 |
+
|
55 |
+
vgg19_model_3.add(vgg19)
|
56 |
+
|
57 |
+
vgg19_model_3.add(Flatten())
|
58 |
+
vgg19_model_3.add(Dense(1024, activation="relu"))
|
59 |
+
vgg19_model_3.add(Dense(1024, activation="relu"))
|
60 |
+
vgg19_model_3.add(Dense(512, activation="relu"))
|
61 |
+
vgg19_model_3.add(Dense(128, activation="relu"))
|
62 |
+
|
63 |
+
merged = Concatenate()([inception_model_1.output, mobilenet_model_2.output, vgg19_model_3.output])
|
64 |
+
|
65 |
+
output = Dense(3, activation='softmax')(merged)
|
66 |
+
|
67 |
+
model_final = Model(inputs=[inception_model_1.input, mobilenet_model_2.input, vgg19_model_3.input],
|
68 |
+
outputs=[output])
|
69 |
+
|
70 |
+
model_final.compile(loss='sparse_categorical_crossentropy',
|
71 |
+
optimizer='adam',
|
72 |
+
metrics=["accuracy"])
|
73 |
+
|
74 |
+
model_final.load_weights("weights/fused-model-weights.h5")
|
75 |
+
|
76 |
+
img1 = cv2.resize(img1, (128, 128))
|
77 |
+
img2 = cv2.resize(img2, (128, 128))
|
78 |
+
img3 = cv2.resize(img3, (128, 128))
|
79 |
+
|
80 |
+
image1 = np.expand_dims(img1, axis=0)
|
81 |
+
image2 = np.expand_dims(img2, axis=0)
|
82 |
+
image3 = np.expand_dims(img3, axis=0)
|
83 |
+
|
84 |
+
predictions = model_final.predict([image1, image2, image3])
|
85 |
+
|
86 |
+
predicted_classes = predictions.argmax(axis=-1)
|
87 |
+
|
88 |
+
result = str(predicted_classes)
|
89 |
+
|
90 |
+
if result == '[0]':
|
91 |
+
final_result = 'glioma'
|
92 |
+
elif result == '[1]':
|
93 |
+
final_result = 'meningimoa'
|
94 |
+
elif result == '[2]':
|
95 |
+
final_result = 'pituitary'
|
96 |
+
|
97 |
+
return final_result
|
requirements.txt
ADDED
Binary file (336 Bytes). View file
|
|
weights/fused-model-weights.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac2b34465910c234805537a09b89637c575e263d013a3404327e4eec26037f9c
|
3 |
+
size 334668168
|