Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -106,12 +106,12 @@ class AttentionUNET(nn.Module):
|
|
106 |
return self.final_conv(x)
|
107 |
|
108 |
def download_model():
|
109 |
-
"""Download
|
110 |
model_url = "https://huggingface.co/spaces/ArchCoder/the-op-segmenter/resolve/main/best_attention_model.pth.tar"
|
111 |
model_path = "best_attention_model.pth.tar"
|
112 |
|
113 |
if not os.path.exists(model_path):
|
114 |
-
print("Downloading
|
115 |
try:
|
116 |
urllib.request.urlretrieve(model_url, model_path)
|
117 |
print("Model downloaded successfully!")
|
@@ -123,39 +123,39 @@ def download_model():
|
|
123 |
|
124 |
return model_path
|
125 |
|
126 |
-
def
|
127 |
-
"""Load
|
128 |
global model
|
129 |
if model is None:
|
130 |
try:
|
131 |
-
print("Loading
|
132 |
|
133 |
# Download model if needed
|
134 |
model_path = download_model()
|
135 |
if model_path is None:
|
136 |
return None
|
137 |
|
138 |
-
# Initialize
|
139 |
model = AttentionUNET(in_channels=1, out_channels=1).to(device)
|
140 |
|
141 |
-
# Load
|
142 |
checkpoint = torch.load(model_path, map_location=device, weights_only=True)
|
143 |
model.load_state_dict(checkpoint["state_dict"])
|
144 |
model.eval()
|
145 |
|
146 |
-
print("
|
147 |
except Exception as e:
|
148 |
-
print(f"Error loading
|
149 |
model = None
|
150 |
return model
|
151 |
|
152 |
-
def
|
153 |
-
"""Preprocessing
|
154 |
-
# Convert to grayscale
|
155 |
if image.mode != 'L':
|
156 |
image = image.convert('L')
|
157 |
|
158 |
-
#
|
159 |
val_test_transform = transforms.Compose([
|
160 |
transforms.Resize((256,256)),
|
161 |
transforms.ToTensor()
|
@@ -164,33 +164,33 @@ def preprocess_for_your_model(image):
|
|
164 |
return val_test_transform(image).unsqueeze(0) # Add batch dimension
|
165 |
|
166 |
def predict_tumor(image, mask=None):
|
167 |
-
current_model =
|
168 |
|
169 |
if current_model is None:
|
170 |
-
return None, "Failed to load
|
171 |
if image is None:
|
172 |
return None, "Please upload an image first."
|
173 |
|
174 |
try:
|
175 |
-
print("Processing with
|
176 |
|
177 |
-
#
|
178 |
-
input_tensor =
|
179 |
|
180 |
-
#
|
181 |
with torch.no_grad():
|
182 |
pred_mask = torch.sigmoid(current_model(input_tensor))
|
183 |
pred_mask_binary = (pred_mask > 0.5).float()
|
184 |
|
185 |
-
# Convert to numpy
|
186 |
pred_mask_np = pred_mask_binary.cpu().squeeze().numpy()
|
187 |
prob_mask_np = pred_mask.cpu().squeeze().numpy() # Probability for heatmap
|
188 |
original_np = np.array(image.convert('L').resize((256, 256)))
|
189 |
|
190 |
-
# Create inverted mask for visualization
|
191 |
inv_pred_mask_np = np.where(pred_mask_np == 1, 0, 255)
|
192 |
|
193 |
-
# Create tumor-only image
|
194 |
tumor_only = np.where(pred_mask_np == 1, original_np, 255)
|
195 |
|
196 |
# Handle ground truth if provided
|
@@ -212,7 +212,7 @@ def predict_tumor(image, mask=None):
|
|
212 |
|
213 |
# Create visualization (5-panel layout)
|
214 |
fig, axes = plt.subplots(1, 5, figsize=(25, 5))
|
215 |
-
fig.suptitle('
|
216 |
|
217 |
titles = ["Original Image", "Ground Truth", "Predicted Mask", "Tumor Only", "Heatmap"]
|
218 |
images = [original_np, mask_np if mask_np is not None else np.zeros_like(original_np), inv_pred_mask_np, tumor_only, prob_mask_np]
|
@@ -233,7 +233,7 @@ def predict_tumor(image, mask=None):
|
|
233 |
|
234 |
result_image = Image.open(buf)
|
235 |
|
236 |
-
# Calculate statistics
|
237 |
tumor_pixels = np.sum(pred_mask_np)
|
238 |
total_pixels = pred_mask_np.size
|
239 |
tumor_percentage = (tumor_pixels / total_pixels) * 100
|
@@ -243,7 +243,7 @@ def predict_tumor(image, mask=None):
|
|
243 |
mean_confidence = torch.mean(pred_mask).item()
|
244 |
|
245 |
analysis_text = f"""
|
246 |
-
##
|
247 |
### Detection Summary:
|
248 |
- **Status**: {'TUMOR DETECTED' if tumor_pixels > 50 else 'NO SIGNIFICANT TUMOR'}
|
249 |
- **Tumor Area**: {tumor_percentage:.2f}% of brain region
|
@@ -256,35 +256,28 @@ def predict_tumor(image, mask=None):
|
|
256 |
- **Dice Score**: {dice_score:.4f}
|
257 |
- **IoU Score**: {iou_score:.4f}
|
258 |
"""
|
259 |
-
analysis_text += """
|
260 |
### Model Information:
|
261 |
-
- **Architecture**:
|
262 |
- **Training Performance**: Dice: 0.8420, IoU: 0.7297
|
263 |
- **Input**: Grayscale (single channel)
|
264 |
- **Output**: Binary segmentation mask
|
265 |
- **Device**: {device.type.upper()}
|
266 |
-
### Model Performance:
|
267 |
-
- **Training Accuracy**: 98.90%
|
268 |
-
- **Best Dice Score**: 0.8420
|
269 |
-
- **Best IoU Score**: 0.7297
|
270 |
-
- **Training Dataset**: Brain tumor segmentation dataset
|
271 |
### Processing Details:
|
272 |
-
- **Preprocessing**: Resize(256×256) + ToTensor
|
273 |
- **Threshold**: 0.5 (sigmoid > 0.5)
|
274 |
- **Architecture**: Attention gates + Skip connections
|
275 |
- **Features**: [32, 64, 128, 256] channels
|
276 |
### Medical Disclaimer:
|
277 |
-
This
|
278 |
Results should be validated by medical professionals. Not for clinical diagnosis.
|
279 |
-
### Model Quality:
|
280 |
-
This is your own trained model with {tumor_percentage:.2f}% detection capability!
|
281 |
"""
|
282 |
|
283 |
-
print(f"
|
284 |
return result_image, analysis_text
|
285 |
|
286 |
except Exception as e:
|
287 |
-
error_msg = f"Error with
|
288 |
print(error_msg)
|
289 |
return None, error_msg
|
290 |
|
@@ -293,113 +286,210 @@ def load_random_sample():
|
|
293 |
return None, None, "Dataset not available."
|
294 |
rand_idx = random.randint(0, len(test_imgs) - 1)
|
295 |
img_path = os.path.join(image_path, test_imgs[rand_idx])
|
296 |
-
msk_path = os.path.join(mask_path, test_masks[rand_idx])
|
297 |
image = Image.open(img_path).convert('L')
|
298 |
mask = Image.open(msk_path).convert('L')
|
299 |
return image, mask, "Loaded random sample from dataset."
|
300 |
|
301 |
def clear_all():
|
302 |
-
return None, None, "Upload a brain MRI image to test
|
303 |
|
304 |
-
#
|
305 |
css = """
|
306 |
.gradio-container {
|
307 |
-
max-width:
|
308 |
margin: auto !important;
|
|
|
|
|
309 |
}
|
310 |
-
|
311 |
-
|
312 |
-
background: linear-gradient(135deg, #
|
313 |
color: white;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
padding: 30px;
|
315 |
-
|
316 |
-
|
317 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
}
|
319 |
"""
|
320 |
|
321 |
-
# Create Gradio interface
|
322 |
-
with gr.Blocks(css=css, title="
|
323 |
|
324 |
gr.HTML("""
|
325 |
-
<div id="title">
|
326 |
-
<h1
|
327 |
-
<p style="font-size:
|
328 |
-
|
329 |
</p>
|
330 |
-
<p style="font-size:
|
331 |
-
|
332 |
</p>
|
333 |
</div>
|
334 |
""")
|
335 |
|
336 |
mask_state = gr.State(None)
|
337 |
|
338 |
-
with gr.Row():
|
339 |
-
with gr.Column(scale=1):
|
340 |
-
gr.Markdown("### Upload Brain MRI")
|
341 |
|
342 |
image_input = gr.Image(
|
343 |
-
label="Brain MRI
|
344 |
type="pil",
|
345 |
sources=["upload", "webcam"],
|
346 |
-
height=
|
347 |
)
|
348 |
|
349 |
with gr.Row():
|
350 |
-
analyze_btn = gr.Button(
|
351 |
-
|
352 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
|
354 |
gr.HTML("""
|
355 |
-
<div
|
356 |
-
<h4 style="color: #
|
357 |
-
<
|
358 |
-
<
|
359 |
-
<
|
360 |
-
<
|
361 |
-
<
|
362 |
-
<
|
363 |
-
</
|
364 |
</div>
|
365 |
""")
|
366 |
|
367 |
with gr.Column(scale=2):
|
368 |
-
gr.Markdown("###
|
369 |
|
370 |
output_image = gr.Image(
|
371 |
-
label="
|
372 |
type="pil",
|
373 |
-
height=
|
374 |
)
|
375 |
|
376 |
analysis_output = gr.Markdown(
|
377 |
-
value="Upload a brain MRI image to
|
378 |
-
elem_id="analysis"
|
379 |
)
|
380 |
-
|
|
|
381 |
gr.HTML("""
|
382 |
-
<div
|
383 |
-
<div
|
384 |
<div>
|
385 |
-
<h4 style="color: #
|
386 |
-
<
|
387 |
-
|
388 |
-
|
389 |
-
|
|
|
|
|
|
|
390 |
</div>
|
391 |
<div>
|
392 |
-
<h4 style="color: #
|
393 |
-
<
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
</
|
398 |
</div>
|
399 |
</div>
|
400 |
-
<hr style="margin:
|
401 |
-
<p style="text-align: center; color: #
|
402 |
-
|
403 |
</p>
|
404 |
</div>
|
405 |
""")
|
@@ -425,9 +515,9 @@ with gr.Blocks(css=css, title="Your Attention U-Net Model", theme=gr.themes.Soft
|
|
425 |
)
|
426 |
|
427 |
if __name__ == "__main__":
|
428 |
-
print("Starting
|
429 |
-
print("
|
430 |
-
print("Auto-downloading
|
431 |
print("Expected performance: Dice 0.8420, IoU 0.7297")
|
432 |
|
433 |
app.launch(
|
|
|
106 |
return self.final_conv(x)
|
107 |
|
108 |
def download_model():
|
109 |
+
"""Download trained model from HuggingFace"""
|
110 |
model_url = "https://huggingface.co/spaces/ArchCoder/the-op-segmenter/resolve/main/best_attention_model.pth.tar"
|
111 |
model_path = "best_attention_model.pth.tar"
|
112 |
|
113 |
if not os.path.exists(model_path):
|
114 |
+
print("Downloading trained model...")
|
115 |
try:
|
116 |
urllib.request.urlretrieve(model_url, model_path)
|
117 |
print("Model downloaded successfully!")
|
|
|
123 |
|
124 |
return model_path
|
125 |
|
126 |
+
def load_attention_model():
|
127 |
+
"""Load trained Attention U-Net model"""
|
128 |
global model
|
129 |
if model is None:
|
130 |
try:
|
131 |
+
print("Loading trained Attention U-Net model...")
|
132 |
|
133 |
# Download model if needed
|
134 |
model_path = download_model()
|
135 |
if model_path is None:
|
136 |
return None
|
137 |
|
138 |
+
# Initialize model architecture
|
139 |
model = AttentionUNET(in_channels=1, out_channels=1).to(device)
|
140 |
|
141 |
+
# Load trained weights
|
142 |
checkpoint = torch.load(model_path, map_location=device, weights_only=True)
|
143 |
model.load_state_dict(checkpoint["state_dict"])
|
144 |
model.eval()
|
145 |
|
146 |
+
print("Attention U-Net model loaded successfully!")
|
147 |
except Exception as e:
|
148 |
+
print(f"Error loading model: {e}")
|
149 |
model = None
|
150 |
return model
|
151 |
|
152 |
+
def preprocess_image(image):
|
153 |
+
"""Preprocessing for model input"""
|
154 |
+
# Convert to grayscale
|
155 |
if image.mode != 'L':
|
156 |
image = image.convert('L')
|
157 |
|
158 |
+
# Apply transforms
|
159 |
val_test_transform = transforms.Compose([
|
160 |
transforms.Resize((256,256)),
|
161 |
transforms.ToTensor()
|
|
|
164 |
return val_test_transform(image).unsqueeze(0) # Add batch dimension
|
165 |
|
166 |
def predict_tumor(image, mask=None):
|
167 |
+
current_model = load_attention_model()
|
168 |
|
169 |
if current_model is None:
|
170 |
+
return None, "Failed to load trained model."
|
171 |
if image is None:
|
172 |
return None, "Please upload an image first."
|
173 |
|
174 |
try:
|
175 |
+
print("Processing with PerceptNet Attention U-Net...")
|
176 |
|
177 |
+
# Preprocess image
|
178 |
+
input_tensor = preprocess_image(image).to(device)
|
179 |
|
180 |
+
# Model prediction
|
181 |
with torch.no_grad():
|
182 |
pred_mask = torch.sigmoid(current_model(input_tensor))
|
183 |
pred_mask_binary = (pred_mask > 0.5).float()
|
184 |
|
185 |
+
# Convert to numpy
|
186 |
pred_mask_np = pred_mask_binary.cpu().squeeze().numpy()
|
187 |
prob_mask_np = pred_mask.cpu().squeeze().numpy() # Probability for heatmap
|
188 |
original_np = np.array(image.convert('L').resize((256, 256)))
|
189 |
|
190 |
+
# Create inverted mask for visualization
|
191 |
inv_pred_mask_np = np.where(pred_mask_np == 1, 0, 255)
|
192 |
|
193 |
+
# Create tumor-only image
|
194 |
tumor_only = np.where(pred_mask_np == 1, original_np, 255)
|
195 |
|
196 |
# Handle ground truth if provided
|
|
|
212 |
|
213 |
# Create visualization (5-panel layout)
|
214 |
fig, axes = plt.subplots(1, 5, figsize=(25, 5))
|
215 |
+
fig.suptitle('PerceptNet Analysis Results', fontsize=16, fontweight='bold')
|
216 |
|
217 |
titles = ["Original Image", "Ground Truth", "Predicted Mask", "Tumor Only", "Heatmap"]
|
218 |
images = [original_np, mask_np if mask_np is not None else np.zeros_like(original_np), inv_pred_mask_np, tumor_only, prob_mask_np]
|
|
|
233 |
|
234 |
result_image = Image.open(buf)
|
235 |
|
236 |
+
# Calculate statistics
|
237 |
tumor_pixels = np.sum(pred_mask_np)
|
238 |
total_pixels = pred_mask_np.size
|
239 |
tumor_percentage = (tumor_pixels / total_pixels) * 100
|
|
|
243 |
mean_confidence = torch.mean(pred_mask).item()
|
244 |
|
245 |
analysis_text = f"""
|
246 |
+
## PerceptNet Analysis Results
|
247 |
### Detection Summary:
|
248 |
- **Status**: {'TUMOR DETECTED' if tumor_pixels > 50 else 'NO SIGNIFICANT TUMOR'}
|
249 |
- **Tumor Area**: {tumor_percentage:.2f}% of brain region
|
|
|
256 |
- **Dice Score**: {dice_score:.4f}
|
257 |
- **IoU Score**: {iou_score:.4f}
|
258 |
"""
|
259 |
+
analysis_text += f"""
|
260 |
### Model Information:
|
261 |
+
- **Architecture**: PerceptNet Attention U-Net
|
262 |
- **Training Performance**: Dice: 0.8420, IoU: 0.7297
|
263 |
- **Input**: Grayscale (single channel)
|
264 |
- **Output**: Binary segmentation mask
|
265 |
- **Device**: {device.type.upper()}
|
|
|
|
|
|
|
|
|
|
|
266 |
### Processing Details:
|
267 |
+
- **Preprocessing**: Resize(256×256) + ToTensor
|
268 |
- **Threshold**: 0.5 (sigmoid > 0.5)
|
269 |
- **Architecture**: Attention gates + Skip connections
|
270 |
- **Features**: [32, 64, 128, 256] channels
|
271 |
### Medical Disclaimer:
|
272 |
+
This AI model is for **research and educational purposes only**.
|
273 |
Results should be validated by medical professionals. Not for clinical diagnosis.
|
|
|
|
|
274 |
"""
|
275 |
|
276 |
+
print(f"Model analysis completed! Tumor area: {tumor_percentage:.2f}%")
|
277 |
return result_image, analysis_text
|
278 |
|
279 |
except Exception as e:
|
280 |
+
error_msg = f"Error with model: {str(e)}"
|
281 |
print(error_msg)
|
282 |
return None, error_msg
|
283 |
|
|
|
286 |
return None, None, "Dataset not available."
|
287 |
rand_idx = random.randint(0, len(test_imgs) - 1)
|
288 |
img_path = os.path.join(image_path, test_imgs[rand_idx])
|
289 |
+
msk_path = os.path.join(mask_path, test_masks[rand_idx])
|
290 |
image = Image.open(img_path).convert('L')
|
291 |
mask = Image.open(msk_path).convert('L')
|
292 |
return image, mask, "Loaded random sample from dataset."
|
293 |
|
294 |
def clear_all():
|
295 |
+
return None, None, "Upload a brain MRI image to test PerceptNet model", None
|
296 |
|
297 |
+
# Professional CSS styling
|
298 |
css = """
|
299 |
.gradio-container {
|
300 |
+
max-width: 1600px !important;
|
301 |
margin: auto !important;
|
302 |
+
background-color: #ffffff !important;
|
303 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif !important;
|
304 |
}
|
305 |
+
|
306 |
+
#title-header {
|
307 |
+
background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%);
|
308 |
color: white;
|
309 |
+
padding: 40px 30px;
|
310 |
+
border-radius: 12px;
|
311 |
+
margin-bottom: 30px;
|
312 |
+
box-shadow: 0 4px 20px rgba(37, 99, 235, 0.15);
|
313 |
+
text-align: center;
|
314 |
+
}
|
315 |
+
|
316 |
+
.main-container {
|
317 |
+
background-color: #ffffff;
|
318 |
+
border-radius: 12px;
|
319 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
|
320 |
padding: 30px;
|
321 |
+
margin-bottom: 20px;
|
322 |
+
}
|
323 |
+
|
324 |
+
.input-section {
|
325 |
+
background-color: #f8fafc;
|
326 |
+
border: 1px solid #e2e8f0;
|
327 |
+
border-radius: 8px;
|
328 |
+
padding: 25px;
|
329 |
+
}
|
330 |
+
|
331 |
+
.info-panel {
|
332 |
+
background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);
|
333 |
+
border: 1px solid #0ea5e9;
|
334 |
+
border-radius: 8px;
|
335 |
+
padding: 20px;
|
336 |
+
margin-top: 20px;
|
337 |
+
}
|
338 |
+
|
339 |
+
.footer-section {
|
340 |
+
background-color: #f8fafc;
|
341 |
+
border: 1px solid #e2e8f0;
|
342 |
+
border-radius: 12px;
|
343 |
+
padding: 30px;
|
344 |
+
margin-top: 30px;
|
345 |
+
}
|
346 |
+
|
347 |
+
.stat-grid {
|
348 |
+
display: grid;
|
349 |
+
grid-template-columns: 1fr 1fr;
|
350 |
+
gap: 30px;
|
351 |
+
margin: 20px 0;
|
352 |
+
}
|
353 |
+
|
354 |
+
.disclaimer-text {
|
355 |
+
color: #dc2626;
|
356 |
+
font-weight: 600;
|
357 |
+
line-height: 1.5;
|
358 |
+
background-color: #fef2f2;
|
359 |
+
padding: 15px;
|
360 |
+
border-radius: 6px;
|
361 |
+
border: 1px solid #fecaca;
|
362 |
+
}
|
363 |
+
|
364 |
+
h1, h2, h3, h4 {
|
365 |
+
color: #1e293b !important;
|
366 |
+
}
|
367 |
+
|
368 |
+
.gr-button-primary {
|
369 |
+
background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%) !important;
|
370 |
+
border: none !important;
|
371 |
+
color: white !important;
|
372 |
+
font-weight: 600 !important;
|
373 |
+
padding: 12px 24px !important;
|
374 |
+
border-radius: 8px !important;
|
375 |
+
transition: all 0.2s ease !important;
|
376 |
+
}
|
377 |
+
|
378 |
+
.gr-button-primary:hover {
|
379 |
+
transform: translateY(-1px) !important;
|
380 |
+
box-shadow: 0 4px 12px rgba(37, 99, 235, 0.3) !important;
|
381 |
+
}
|
382 |
+
|
383 |
+
.gr-button-secondary {
|
384 |
+
background: #6b7280 !important;
|
385 |
+
border: none !important;
|
386 |
+
color: white !important;
|
387 |
+
font-weight: 600 !important;
|
388 |
+
padding: 12px 24px !important;
|
389 |
+
border-radius: 8px !important;
|
390 |
}
|
391 |
"""
|
392 |
|
393 |
+
# Create Gradio interface
|
394 |
+
with gr.Blocks(css=css, title="PerceptNet - Brain Tumor Segmentation", theme=gr.themes.Default()) as app:
|
395 |
|
396 |
gr.HTML("""
|
397 |
+
<div id="title-header">
|
398 |
+
<h1 style="margin: 0; font-size: 2.5rem; font-weight: 700;">PerceptNet</h1>
|
399 |
+
<p style="font-size: 1.2rem; margin: 15px 0 5px 0; opacity: 0.95;">
|
400 |
+
Advanced Brain Tumor Segmentation System
|
401 |
</p>
|
402 |
+
<p style="font-size: 1rem; margin: 5px 0 0 0; opacity: 0.8;">
|
403 |
+
Attention U-Net Architecture • Dice: 0.8420 • IoU: 0.7297
|
404 |
</p>
|
405 |
</div>
|
406 |
""")
|
407 |
|
408 |
mask_state = gr.State(None)
|
409 |
|
410 |
+
with gr.Row(elem_classes="main-container"):
|
411 |
+
with gr.Column(scale=1, elem_classes="input-section"):
|
412 |
+
gr.Markdown("### Upload Brain MRI Scan", elem_classes="section-title")
|
413 |
|
414 |
image_input = gr.Image(
|
415 |
+
label="Brain MRI Image",
|
416 |
type="pil",
|
417 |
sources=["upload", "webcam"],
|
418 |
+
height=380
|
419 |
)
|
420 |
|
421 |
with gr.Row():
|
422 |
+
analyze_btn = gr.Button(
|
423 |
+
"Analyze Image",
|
424 |
+
variant="primary",
|
425 |
+
scale=2,
|
426 |
+
size="lg"
|
427 |
+
)
|
428 |
+
random_btn = gr.Button(
|
429 |
+
"Load Sample",
|
430 |
+
variant="secondary",
|
431 |
+
scale=1,
|
432 |
+
size="lg"
|
433 |
+
)
|
434 |
+
clear_btn = gr.Button(
|
435 |
+
"Clear",
|
436 |
+
variant="secondary",
|
437 |
+
scale=1
|
438 |
+
)
|
439 |
|
440 |
gr.HTML("""
|
441 |
+
<div class="info-panel">
|
442 |
+
<h4 style="color: #0ea5e9; margin-bottom: 15px; font-size: 1.1rem;">Model Specifications</h4>
|
443 |
+
<div style="line-height: 1.8; font-size: 0.95rem;">
|
444 |
+
<div><strong>Architecture:</strong> Attention U-Net with Skip Connections</div>
|
445 |
+
<div><strong>Performance:</strong> 84.2% Dice Score, 72.97% IoU</div>
|
446 |
+
<div><strong>Input Format:</strong> Grayscale MRI Scans (256×256)</div>
|
447 |
+
<div><strong>Output:</strong> Binary Segmentation + Confidence Heatmap</div>
|
448 |
+
<div><strong>Features:</strong> Attention Mechanisms, Multi-scale Analysis</div>
|
449 |
+
</div>
|
450 |
</div>
|
451 |
""")
|
452 |
|
453 |
with gr.Column(scale=2):
|
454 |
+
gr.Markdown("### Analysis Results", elem_classes="section-title")
|
455 |
|
456 |
output_image = gr.Image(
|
457 |
+
label="PerceptNet Analysis Output",
|
458 |
type="pil",
|
459 |
+
height=520
|
460 |
)
|
461 |
|
462 |
analysis_output = gr.Markdown(
|
463 |
+
value="Upload a brain MRI image to begin analysis with PerceptNet.",
|
464 |
+
elem_id="analysis-results"
|
465 |
)
|
466 |
+
|
467 |
+
# Footer section
|
468 |
gr.HTML("""
|
469 |
+
<div class="footer-section">
|
470 |
+
<div class="stat-grid">
|
471 |
<div>
|
472 |
+
<h4 style="color: #2563eb; margin-bottom: 15px;">Technical Specifications</h4>
|
473 |
+
<div style="line-height: 1.6;">
|
474 |
+
<p><strong>Model Architecture:</strong> Attention U-Net with Gating Mechanisms</p>
|
475 |
+
<p><strong>Training Dataset:</strong> Brain Tumor Segmentation Dataset</p>
|
476 |
+
<p><strong>Image Processing:</strong> 256×256 Grayscale Normalization</p>
|
477 |
+
<p><strong>Inference Speed:</strong> Real-time Processing on GPU/CPU</p>
|
478 |
+
<p><strong>Output Formats:</strong> Binary Masks, Probability Maps, Heatmaps</p>
|
479 |
+
</div>
|
480 |
</div>
|
481 |
<div>
|
482 |
+
<h4 style="color: #dc2626; margin-bottom: 15px;">Important Disclaimer</h4>
|
483 |
+
<div class="disclaimer-text">
|
484 |
+
PerceptNet is an AI research tool designed for <strong>educational and research purposes only</strong>.
|
485 |
+
This system is not intended for clinical diagnosis or medical decision-making.
|
486 |
+
All results must be validated by qualified medical professionals before any medical application.
|
487 |
+
</div>
|
488 |
</div>
|
489 |
</div>
|
490 |
+
<hr style="margin: 25px 0; border: none; border-top: 1px solid #e2e8f0;">
|
491 |
+
<p style="text-align: center; color: #64748b; margin: 15px 0; font-weight: 500;">
|
492 |
+
PerceptNet v1.0 • Advanced Medical Image Analysis • Research Grade Performance
|
493 |
</p>
|
494 |
</div>
|
495 |
""")
|
|
|
515 |
)
|
516 |
|
517 |
if __name__ == "__main__":
|
518 |
+
print("Starting PerceptNet Brain Tumor Segmentation System...")
|
519 |
+
print("Loading Attention U-Net architecture...")
|
520 |
+
print("Auto-downloading model weights...")
|
521 |
print("Expected performance: Dice 0.8420, IoU 0.7297")
|
522 |
|
523 |
app.launch(
|