Update src/streamlit_app.py
Browse files- src/streamlit_app.py +15 -3
src/streamlit_app.py
CHANGED
|
@@ -7,13 +7,19 @@ forums](https://discuss.streamlit.io).
|
|
| 7 |
|
| 8 |
In the meantime, below is an example of what you can do with just a few lines of code:
|
| 9 |
"""
|
|
|
|
| 10 |
import os
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
os.environ["
|
| 14 |
-
os.environ["
|
| 15 |
|
| 16 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
import torch
|
| 18 |
import torch.nn as nn
|
| 19 |
import torch.nn.functional as F
|
|
@@ -91,6 +97,7 @@ model.eval()
|
|
| 91 |
|
| 92 |
# Label dictionary
|
| 93 |
label_dict = {0: "Meningioma", 1: "Glioma", 2: "No Tumor", 3: "Pituitary"}
|
|
|
|
| 94 |
# Preprocessing
|
| 95 |
def preprocess_image(image):
|
| 96 |
transform = transforms.Compose([
|
|
@@ -99,6 +106,7 @@ def preprocess_image(image):
|
|
| 99 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 100 |
])
|
| 101 |
return transform(image).unsqueeze(0).to(device)
|
|
|
|
| 102 |
# Grad-CAM
|
| 103 |
def visualize_grad_cam(image, model, target_layer, label):
|
| 104 |
img_np = np.array(image) / 255.0
|
|
@@ -112,6 +120,7 @@ def visualize_grad_cam(image, model, target_layer, label):
|
|
| 112 |
grayscale_cam_resized = cv2.resize(grayscale_cam, (224, 224))
|
| 113 |
visualization = show_cam_on_image(img_np, grayscale_cam_resized, use_rgb=True)
|
| 114 |
return visualization
|
|
|
|
| 115 |
# LIME
|
| 116 |
def model_predict(images):
|
| 117 |
preprocessed_images = [preprocess_image(Img.fromarray(img)) for img in images]
|
|
@@ -120,6 +129,7 @@ def model_predict(images):
|
|
| 120 |
logits = model(images_tensor)
|
| 121 |
probabilities = F.softmax(logits, dim=1)
|
| 122 |
return probabilities.cpu().numpy()
|
|
|
|
| 123 |
def visualize_lime(image):
|
| 124 |
explainer = LimeImageExplainer()
|
| 125 |
original_image = np.array(image)
|
|
@@ -127,6 +137,7 @@ def visualize_lime(image):
|
|
| 127 |
top_label = explanation.top_labels[0]
|
| 128 |
temp, mask = explanation.get_image_and_mask(label=top_label, positive_only=True, num_features=10, hide_rest=False)
|
| 129 |
return mark_boundaries(temp / 255.0, mask)
|
|
|
|
| 130 |
# SHAP
|
| 131 |
def visualize_shap(image):
|
| 132 |
img_tensor = preprocess_image(image).to(device)
|
|
@@ -147,6 +158,7 @@ def visualize_shap(image):
|
|
| 147 |
ax.axis('off')
|
| 148 |
plt.tight_layout()
|
| 149 |
return fig
|
|
|
|
| 150 |
# Streamlit UI
|
| 151 |
st.title("Brain Tumor Classification with Grad-CAM, LIME, and SHAP")
|
| 152 |
uploaded_file = st.file_uploader("Upload an MRI Image", type=["jpg", "png", "jpeg"])
|
|
|
|
| 7 |
|
| 8 |
In the meantime, below is an example of what you can do with just a few lines of code:
|
| 9 |
"""
|
| 10 |
+
|
| 11 |
import os
|
| 12 |
|
| 13 |
+
# Redirect Streamlit and Matplotlib config to temporary, writable directories
|
| 14 |
+
os.environ["STREAMLIT_CONFIG_DIR"] = "/tmp/.streamlit"
|
| 15 |
+
os.environ["MPLCONFIGDIR"] = "/tmp"
|
| 16 |
|
| 17 |
import streamlit as st
|
| 18 |
+
|
| 19 |
+
# Disable Streamlit usage stats and file watcher to prevent config writes
|
| 20 |
+
st._config.set_option("browser.gatherUsageStats", False)
|
| 21 |
+
st._config.set_option("server.fileWatcherType", "none")
|
| 22 |
+
|
| 23 |
import torch
|
| 24 |
import torch.nn as nn
|
| 25 |
import torch.nn.functional as F
|
|
|
|
| 97 |
|
| 98 |
# Label dictionary
|
| 99 |
label_dict = {0: "Meningioma", 1: "Glioma", 2: "No Tumor", 3: "Pituitary"}
|
| 100 |
+
|
| 101 |
# Preprocessing
|
| 102 |
def preprocess_image(image):
|
| 103 |
transform = transforms.Compose([
|
|
|
|
| 106 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 107 |
])
|
| 108 |
return transform(image).unsqueeze(0).to(device)
|
| 109 |
+
|
| 110 |
# Grad-CAM
|
| 111 |
def visualize_grad_cam(image, model, target_layer, label):
|
| 112 |
img_np = np.array(image) / 255.0
|
|
|
|
| 120 |
grayscale_cam_resized = cv2.resize(grayscale_cam, (224, 224))
|
| 121 |
visualization = show_cam_on_image(img_np, grayscale_cam_resized, use_rgb=True)
|
| 122 |
return visualization
|
| 123 |
+
|
| 124 |
# LIME
|
| 125 |
def model_predict(images):
|
| 126 |
preprocessed_images = [preprocess_image(Img.fromarray(img)) for img in images]
|
|
|
|
| 129 |
logits = model(images_tensor)
|
| 130 |
probabilities = F.softmax(logits, dim=1)
|
| 131 |
return probabilities.cpu().numpy()
|
| 132 |
+
|
| 133 |
def visualize_lime(image):
|
| 134 |
explainer = LimeImageExplainer()
|
| 135 |
original_image = np.array(image)
|
|
|
|
| 137 |
top_label = explanation.top_labels[0]
|
| 138 |
temp, mask = explanation.get_image_and_mask(label=top_label, positive_only=True, num_features=10, hide_rest=False)
|
| 139 |
return mark_boundaries(temp / 255.0, mask)
|
| 140 |
+
|
| 141 |
# SHAP
|
| 142 |
def visualize_shap(image):
|
| 143 |
img_tensor = preprocess_image(image).to(device)
|
|
|
|
| 158 |
ax.axis('off')
|
| 159 |
plt.tight_layout()
|
| 160 |
return fig
|
| 161 |
+
|
| 162 |
# Streamlit UI
|
| 163 |
st.title("Brain Tumor Classification with Grad-CAM, LIME, and SHAP")
|
| 164 |
uploaded_file = st.file_uploader("Upload an MRI Image", type=["jpg", "png", "jpeg"])
|