Upload folder using huggingface_hub
Browse files- README.md +141 -141
- space.py +2 -2
- src/README.md +141 -141
- src/backend/gradio_medical_image_analyzer/medical_image_analyzer.py +13 -7
- src/backend/gradio_medical_image_analyzer/medical_image_analyzer.pyi +13 -7
- src/demo/README.md +21 -2
- src/demo/space.py +2 -2
- src/pyproject.toml +18 -4
README.md
CHANGED
|
@@ -16,11 +16,10 @@ tags:
|
|
| 16 |
- ai-agents
|
| 17 |
---
|
| 18 |
|
| 19 |
-
|
| 20 |
# `gradio_medical_image_analyzer`
|
| 21 |
-
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0
|
| 22 |
|
| 23 |
-
AI-agent optimized medical image analysis component for Gradio
|
| 24 |
|
| 25 |
## Installation
|
| 26 |
|
|
@@ -55,11 +54,11 @@ def draw_roi_on_image(image, roi_x, roi_y, roi_radius):
|
|
| 55 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 56 |
else:
|
| 57 |
image_rgb = image.copy()
|
| 58 |
-
|
| 59 |
# Draw ROI circle
|
| 60 |
center = (int(roi_x), int(roi_y))
|
| 61 |
radius = int(roi_radius)
|
| 62 |
-
|
| 63 |
# Draw outer circle (white)
|
| 64 |
cv2.circle(image_rgb, center, radius, (255, 255, 255), 2)
|
| 65 |
# Draw inner circle (red)
|
|
@@ -67,7 +66,7 @@ def draw_roi_on_image(image, roi_x, roi_y, roi_radius):
|
|
| 67 |
# Draw center cross
|
| 68 |
cv2.line(image_rgb, (center[0]-5, center[1]), (center[0]+5, center[1]), (255, 0, 0), 2)
|
| 69 |
cv2.line(image_rgb, (center[0], center[1]-5), (center[0], center[1]+5), (255, 0, 0), 2)
|
| 70 |
-
|
| 71 |
return image_rgb
|
| 72 |
|
| 73 |
def create_fat_overlay(base_image, segmentation_results):
|
|
@@ -77,33 +76,33 @@ def create_fat_overlay(base_image, segmentation_results):
|
|
| 77 |
overlay_img = cv2.cvtColor(base_image, cv2.COLOR_GRAY2RGB)
|
| 78 |
else:
|
| 79 |
overlay_img = base_image.copy()
|
| 80 |
-
|
| 81 |
# Check if we have segmentation masks
|
| 82 |
if not segmentation_results or 'segments' not in segmentation_results:
|
| 83 |
return overlay_img
|
| 84 |
-
|
| 85 |
segments = segmentation_results.get('segments', {})
|
| 86 |
-
|
| 87 |
# Apply subcutaneous fat overlay (yellow)
|
| 88 |
if 'subcutaneous' in segments and segments['subcutaneous'].get('mask') is not None:
|
| 89 |
mask = segments['subcutaneous']['mask']
|
| 90 |
yellow_overlay = np.zeros_like(overlay_img)
|
| 91 |
yellow_overlay[mask > 0] = [255, 255, 0] # Yellow
|
| 92 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, yellow_overlay, 0.3, 0)
|
| 93 |
-
|
| 94 |
# Apply visceral fat overlay (red)
|
| 95 |
if 'visceral' in segments and segments['visceral'].get('mask') is not None:
|
| 96 |
mask = segments['visceral']['mask']
|
| 97 |
red_overlay = np.zeros_like(overlay_img)
|
| 98 |
red_overlay[mask > 0] = [255, 0, 0] # Red
|
| 99 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, red_overlay, 0.3, 0)
|
| 100 |
-
|
| 101 |
# Add legend
|
| 102 |
-
cv2.putText(overlay_img, "Yellow: Subcutaneous Fat", (10, 30),
|
| 103 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 104 |
-
cv2.putText(overlay_img, "Red: Visceral Fat", (10, 60),
|
| 105 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 106 |
-
|
| 107 |
return overlay_img
|
| 108 |
|
| 109 |
def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay=False):
|
|
@@ -112,68 +111,68 @@ def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symp
|
|
| 112 |
"""
|
| 113 |
if file_obj is None:
|
| 114 |
return None, "No file selected", None, {}, None
|
| 115 |
-
|
| 116 |
# Create analyzer instance
|
| 117 |
analyzer = MedicalImageAnalyzer(
|
| 118 |
analysis_mode="structured",
|
| 119 |
include_confidence=True,
|
| 120 |
include_reasoning=True
|
| 121 |
)
|
| 122 |
-
|
| 123 |
try:
|
| 124 |
# Process the file (DICOM or image)
|
| 125 |
file_path = file_obj.name if hasattr(file_obj, 'name') else str(file_obj)
|
| 126 |
pixel_array, display_array, metadata = analyzer.process_file(file_path)
|
| 127 |
-
|
| 128 |
# Update modality from file metadata if it's a DICOM
|
| 129 |
if metadata.get('file_type') == 'DICOM' and 'modality' in metadata:
|
| 130 |
modality = metadata['modality']
|
| 131 |
-
|
| 132 |
# Prepare analysis parameters
|
| 133 |
analysis_params = {
|
| 134 |
"image": pixel_array,
|
| 135 |
"modality": modality,
|
| 136 |
"task": task
|
| 137 |
}
|
| 138 |
-
|
| 139 |
# Add ROI if applicable
|
| 140 |
if task in ["analyze_point", "full_analysis"]:
|
| 141 |
# Scale ROI coordinates to image size
|
| 142 |
h, w = pixel_array.shape
|
| 143 |
roi_x_scaled = int(roi_x * w / 512) # Assuming slider max is 512
|
| 144 |
roi_y_scaled = int(roi_y * h / 512)
|
| 145 |
-
|
| 146 |
analysis_params["roi"] = {
|
| 147 |
"x": roi_x_scaled,
|
| 148 |
"y": roi_y_scaled,
|
| 149 |
"radius": roi_radius
|
| 150 |
}
|
| 151 |
-
|
| 152 |
# Add clinical context
|
| 153 |
if symptoms:
|
| 154 |
analysis_params["clinical_context"] = {"symptoms": symptoms}
|
| 155 |
-
|
| 156 |
# Perform analysis
|
| 157 |
results = analyzer.analyze_image(**analysis_params)
|
| 158 |
-
|
| 159 |
# Create visual report
|
| 160 |
visual_report = create_visual_report(results, metadata)
|
| 161 |
-
|
| 162 |
# Add metadata info
|
| 163 |
info = f"📄 {metadata.get('file_type', 'Unknown')} | "
|
| 164 |
info += f"🏥 {modality} | "
|
| 165 |
info += f"📐 {metadata.get('shape', 'Unknown')}"
|
| 166 |
-
|
| 167 |
if metadata.get('window_center'):
|
| 168 |
info += f" | Window C:{metadata['window_center']:.0f} W:{metadata['window_width']:.0f}"
|
| 169 |
-
|
| 170 |
# Create overlay image if requested
|
| 171 |
overlay_image = None
|
| 172 |
if show_overlay:
|
| 173 |
# For ROI visualization
|
| 174 |
if task in ["analyze_point", "full_analysis"] and roi_x and roi_y:
|
| 175 |
overlay_image = draw_roi_on_image(display_array.copy(), roi_x_scaled, roi_y_scaled, roi_radius)
|
| 176 |
-
|
| 177 |
# For fat segmentation overlay (simplified version since we don't have masks in current implementation)
|
| 178 |
elif task == "segment_fat" and 'segmentation' in results and modality == 'CT':
|
| 179 |
# For now, just draw ROI since we don't have actual masks
|
|
@@ -183,15 +182,15 @@ def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symp
|
|
| 183 |
# Add text overlay about fat percentages
|
| 184 |
if 'statistics' in results['segmentation']:
|
| 185 |
stats = results['segmentation']['statistics']
|
| 186 |
-
cv2.putText(overlay_image, f"Total Fat: {stats.get('total_fat_percentage', 0):.1f}%",
|
| 187 |
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
| 188 |
-
cv2.putText(overlay_image, f"Subcutaneous: {stats.get('subcutaneous_fat_percentage', 0):.1f}%",
|
| 189 |
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 190 |
-
cv2.putText(overlay_image, f"Visceral: {stats.get('visceral_fat_percentage', 0):.1f}%",
|
| 191 |
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 192 |
-
|
| 193 |
return display_array, info, visual_report, results, overlay_image
|
| 194 |
-
|
| 195 |
except Exception as e:
|
| 196 |
error_msg = f"Error: {str(e)}"
|
| 197 |
return None, error_msg, f"<div style='color: red;'>❌ {error_msg}</div>", {"error": error_msg}, None
|
|
@@ -199,31 +198,31 @@ def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symp
|
|
| 199 |
def create_visual_report(results, metadata):
|
| 200 |
"""Creates a visual HTML report with improved styling"""
|
| 201 |
html = f"""
|
| 202 |
-
<div class='medical-report' style='font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
| 203 |
-
padding: 24px;
|
| 204 |
-
background: #ffffff;
|
| 205 |
-
border-radius: 12px;
|
| 206 |
-
max-width: 100%;
|
| 207 |
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 208 |
color: #1a1a1a !important;'>
|
| 209 |
-
|
| 210 |
-
<h2 style='color: #1e40af !important;
|
| 211 |
-
border-bottom: 3px solid #3b82f6;
|
| 212 |
-
padding-bottom: 12px;
|
| 213 |
margin-bottom: 20px;
|
| 214 |
font-size: 24px;
|
| 215 |
font-weight: 600;'>
|
| 216 |
🏥 Medical Image Analysis Report
|
| 217 |
</h2>
|
| 218 |
-
|
| 219 |
-
<div style='background: #f0f9ff;
|
| 220 |
-
padding: 20px;
|
| 221 |
-
margin: 16px 0;
|
| 222 |
-
border-radius: 8px;
|
| 223 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 224 |
-
<h3 style='color: #1e3a8a !important;
|
| 225 |
-
font-size: 18px;
|
| 226 |
-
font-weight: 600;
|
| 227 |
margin-bottom: 12px;'>
|
| 228 |
📋 Metadata
|
| 229 |
</h3>
|
|
@@ -247,21 +246,21 @@ def create_visual_report(results, metadata):
|
|
| 247 |
</table>
|
| 248 |
</div>
|
| 249 |
"""
|
| 250 |
-
|
| 251 |
# Point Analysis
|
| 252 |
if 'point_analysis' in results:
|
| 253 |
pa = results['point_analysis']
|
| 254 |
tissue = pa.get('tissue_type', {})
|
| 255 |
-
|
| 256 |
html += f"""
|
| 257 |
-
<div style='background: #f0f9ff;
|
| 258 |
-
padding: 20px;
|
| 259 |
-
margin: 16px 0;
|
| 260 |
-
border-radius: 8px;
|
| 261 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 262 |
-
<h3 style='color: #1e3a8a !important;
|
| 263 |
-
font-size: 18px;
|
| 264 |
-
font-weight: 600;
|
| 265 |
margin-bottom: 12px;'>
|
| 266 |
🎯 Point Analysis
|
| 267 |
</h3>
|
|
@@ -271,7 +270,7 @@ def create_visual_report(results, metadata):
|
|
| 271 |
<td style='padding: 8px 0; color: #1f2937 !important;'>({pa.get('location', {}).get('x', 'N/A')}, {pa.get('location', {}).get('y', 'N/A')})</td>
|
| 272 |
</tr>
|
| 273 |
"""
|
| 274 |
-
|
| 275 |
if results.get('modality') == 'CT':
|
| 276 |
html += f"""
|
| 277 |
<tr>
|
|
@@ -286,12 +285,12 @@ def create_visual_report(results, metadata):
|
|
| 286 |
<td style='padding: 8px 0; color: #1f2937 !important;'>{pa.get('intensity', 'N/A'):.3f}</td>
|
| 287 |
</tr>
|
| 288 |
"""
|
| 289 |
-
|
| 290 |
html += f"""
|
| 291 |
<tr>
|
| 292 |
<td style='padding: 8px 0; color: #4b5563 !important;'><strong style='color: #374151 !important;'>Tissue Type:</strong></td>
|
| 293 |
<td style='padding: 8px 0; color: #1f2937 !important;'>
|
| 294 |
-
<span style='font-size: 1.3em; vertical-align: middle;'>{tissue.get('icon', '')}</span>
|
| 295 |
<span style='font-weight: 500; text-transform: capitalize;'>{tissue.get('type', 'Unknown').replace('_', ' ')}</span>
|
| 296 |
</td>
|
| 297 |
</tr>
|
|
@@ -301,38 +300,38 @@ def create_visual_report(results, metadata):
|
|
| 301 |
</tr>
|
| 302 |
</table>
|
| 303 |
"""
|
| 304 |
-
|
| 305 |
if 'reasoning' in pa:
|
| 306 |
html += f"""
|
| 307 |
-
<div style='margin-top: 12px;
|
| 308 |
-
padding: 12px;
|
| 309 |
-
background: #dbeafe;
|
| 310 |
-
border-left: 3px solid #3b82f6;
|
| 311 |
border-radius: 4px;'>
|
| 312 |
<p style='margin: 0; color: #1e40af !important; font-style: italic;'>
|
| 313 |
💭 {pa['reasoning']}
|
| 314 |
</p>
|
| 315 |
</div>
|
| 316 |
"""
|
| 317 |
-
|
| 318 |
html += "</div>"
|
| 319 |
-
|
| 320 |
# Segmentation Results
|
| 321 |
if 'segmentation' in results and results['segmentation']:
|
| 322 |
seg = results['segmentation']
|
| 323 |
-
|
| 324 |
if 'statistics' in seg:
|
| 325 |
# Fat segmentation for CT
|
| 326 |
stats = seg['statistics']
|
| 327 |
html += f"""
|
| 328 |
-
<div style='background: #f0f9ff;
|
| 329 |
-
padding: 20px;
|
| 330 |
-
margin: 16px 0;
|
| 331 |
-
border-radius: 8px;
|
| 332 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 333 |
-
<h3 style='color: #1e3a8a !important;
|
| 334 |
-
font-size: 18px;
|
| 335 |
-
font-weight: 600;
|
| 336 |
margin-bottom: 12px;'>
|
| 337 |
🔬 Fat Segmentation Analysis
|
| 338 |
</h3>
|
|
@@ -355,12 +354,12 @@ def create_visual_report(results, metadata):
|
|
| 355 |
</div>
|
| 356 |
</div>
|
| 357 |
"""
|
| 358 |
-
|
| 359 |
if 'interpretation' in seg:
|
| 360 |
interp = seg['interpretation']
|
| 361 |
obesity_color = "#16a34a" if interp.get("obesity_risk") == "normal" else "#d97706" if interp.get("obesity_risk") == "moderate" else "#dc2626"
|
| 362 |
visceral_color = "#16a34a" if interp.get("visceral_risk") == "normal" else "#d97706" if interp.get("visceral_risk") == "moderate" else "#dc2626"
|
| 363 |
-
|
| 364 |
html += f"""
|
| 365 |
<div style='margin-top: 16px; padding: 16px; background: #f3f4f6; border-radius: 6px;'>
|
| 366 |
<h4 style='color: #374151 !important; font-size: 16px; font-weight: 600; margin-bottom: 8px;'>Risk Assessment</h4>
|
|
@@ -375,7 +374,7 @@ def create_visual_report(results, metadata):
|
|
| 375 |
</div>
|
| 376 |
</div>
|
| 377 |
"""
|
| 378 |
-
|
| 379 |
if interp.get('recommendations'):
|
| 380 |
html += """
|
| 381 |
<div style='margin-top: 12px; padding-top: 12px; border-top: 1px solid #e5e7eb;'>
|
|
@@ -385,10 +384,10 @@ def create_visual_report(results, metadata):
|
|
| 385 |
for rec in interp['recommendations']:
|
| 386 |
html += f"<li style='margin: 4px 0;'>{rec}</li>"
|
| 387 |
html += "</ul></div>"
|
| 388 |
-
|
| 389 |
html += "</div>"
|
| 390 |
html += "</div>"
|
| 391 |
-
|
| 392 |
# Quality Assessment
|
| 393 |
if 'quality_metrics' in results:
|
| 394 |
quality = results['quality_metrics']
|
|
@@ -400,38 +399,38 @@ def create_visual_report(results, metadata):
|
|
| 400 |
'unknown': '#6b7280'
|
| 401 |
}
|
| 402 |
q_color = quality_colors.get(quality.get('overall_quality', 'unknown'), '#6b7280')
|
| 403 |
-
|
| 404 |
html += f"""
|
| 405 |
-
<div style='background: #f0f9ff;
|
| 406 |
-
padding: 20px;
|
| 407 |
-
margin: 16px 0;
|
| 408 |
-
border-radius: 8px;
|
| 409 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 410 |
-
<h3 style='color: #1e3a8a !important;
|
| 411 |
-
font-size: 18px;
|
| 412 |
-
font-weight: 600;
|
| 413 |
margin-bottom: 12px;'>
|
| 414 |
📊 Image Quality Assessment
|
| 415 |
</h3>
|
| 416 |
<div style='display: flex; align-items: center; gap: 16px;'>
|
| 417 |
<div>
|
| 418 |
<span style='color: #4b5563 !important; font-size: 14px;'>Overall Quality:</span>
|
| 419 |
-
<span style='color: {q_color} !important;
|
| 420 |
-
font-size: 18px;
|
| 421 |
-
font-weight: 700;
|
| 422 |
margin-left: 8px;'>
|
| 423 |
{quality.get('overall_quality', 'unknown').upper()}
|
| 424 |
</span>
|
| 425 |
</div>
|
| 426 |
</div>
|
| 427 |
"""
|
| 428 |
-
|
| 429 |
if quality.get('issues'):
|
| 430 |
html += f"""
|
| 431 |
-
<div style='margin-top: 12px;
|
| 432 |
-
padding: 12px;
|
| 433 |
-
background: #fef3c7;
|
| 434 |
-
border-left: 3px solid #f59e0b;
|
| 435 |
border-radius: 4px;'>
|
| 436 |
<strong style='color: #92400e !important;'>Issues Detected:</strong>
|
| 437 |
<ul style='margin: 4px 0 0 0; padding-left: 20px; color: #92400e !important;'>
|
|
@@ -439,15 +438,15 @@ def create_visual_report(results, metadata):
|
|
| 439 |
for issue in quality['issues']:
|
| 440 |
html += f"<li style='margin: 2px 0;'>{issue}</li>"
|
| 441 |
html += "</ul></div>"
|
| 442 |
-
|
| 443 |
html += "</div>"
|
| 444 |
-
|
| 445 |
html += "</div>"
|
| 446 |
return html
|
| 447 |
|
| 448 |
def create_demo():
|
| 449 |
with gr.Blocks(
|
| 450 |
-
title="Medical Image Analyzer - Enhanced Demo",
|
| 451 |
theme=gr.themes.Soft(
|
| 452 |
primary_hue="blue",
|
| 453 |
secondary_hue="blue",
|
|
@@ -486,12 +485,12 @@ def create_demo():
|
|
| 486 |
--bg-secondary: #1e293b;
|
| 487 |
--bg-tertiary: #334155;
|
| 488 |
}
|
| 489 |
-
|
| 490 |
/* Override default text colors for medical theme */
|
| 491 |
* {
|
| 492 |
color: var(--text-primary) !important;
|
| 493 |
}
|
| 494 |
-
|
| 495 |
/* Style the file upload area */
|
| 496 |
.file-upload {
|
| 497 |
border: 2px dashed var(--medical-blue-light) !important;
|
|
@@ -502,13 +501,13 @@ def create_demo():
|
|
| 502 |
transition: all 0.3s ease !important;
|
| 503 |
color: var(--text-primary) !important;
|
| 504 |
}
|
| 505 |
-
|
| 506 |
.file-upload:hover {
|
| 507 |
border-color: var(--medical-blue) !important;
|
| 508 |
background: var(--bg-tertiary) !important;
|
| 509 |
box-shadow: 0 0 20px rgba(59, 130, 246, 0.2) !important;
|
| 510 |
}
|
| 511 |
-
|
| 512 |
/* Ensure report text is readable with white background */
|
| 513 |
.medical-report {
|
| 514 |
background: #ffffff !important;
|
|
@@ -517,64 +516,64 @@ def create_demo():
|
|
| 517 |
padding: 16px !important;
|
| 518 |
color: #1a1a1a !important;
|
| 519 |
}
|
| 520 |
-
|
| 521 |
.medical-report * {
|
| 522 |
color: #1f2937 !important; /* Dark gray text */
|
| 523 |
}
|
| 524 |
-
|
| 525 |
.medical-report h2 {
|
| 526 |
color: #1e40af !important; /* Medical blue for main heading */
|
| 527 |
}
|
| 528 |
-
|
| 529 |
.medical-report h3, .medical-report h4 {
|
| 530 |
color: #1e3a8a !important; /* Darker medical blue for subheadings */
|
| 531 |
}
|
| 532 |
-
|
| 533 |
.medical-report strong {
|
| 534 |
color: #374151 !important; /* Darker gray for labels */
|
| 535 |
}
|
| 536 |
-
|
| 537 |
.medical-report td {
|
| 538 |
color: #1f2937 !important; /* Ensure table text is dark */
|
| 539 |
}
|
| 540 |
-
|
| 541 |
/* Report sections with light blue background */
|
| 542 |
.medical-report > div {
|
| 543 |
background: #f0f9ff !important;
|
| 544 |
color: #1f2937 !important;
|
| 545 |
}
|
| 546 |
-
|
| 547 |
/* Medical blue accents for UI elements */
|
| 548 |
.gr-button-primary {
|
| 549 |
background: var(--medical-blue) !important;
|
| 550 |
border-color: var(--medical-blue) !important;
|
| 551 |
}
|
| 552 |
-
|
| 553 |
.gr-button-primary:hover {
|
| 554 |
background: var(--medical-blue-dark) !important;
|
| 555 |
border-color: var(--medical-blue-dark) !important;
|
| 556 |
}
|
| 557 |
-
|
| 558 |
/* Tab styling */
|
| 559 |
.gr-tab-item {
|
| 560 |
border-color: var(--medical-blue-light) !important;
|
| 561 |
}
|
| 562 |
-
|
| 563 |
.gr-tab-item.selected {
|
| 564 |
background: var(--medical-blue) !important;
|
| 565 |
color: white !important;
|
| 566 |
}
|
| 567 |
-
|
| 568 |
/* Accordion styling */
|
| 569 |
.gr-accordion {
|
| 570 |
border-color: var(--medical-blue-light) !important;
|
| 571 |
}
|
| 572 |
-
|
| 573 |
/* Slider track in medical blue */
|
| 574 |
input[type="range"]::-webkit-slider-track {
|
| 575 |
background: var(--bg-tertiary) !important;
|
| 576 |
}
|
| 577 |
-
|
| 578 |
input[type="range"]::-webkit-slider-thumb {
|
| 579 |
background: var(--medical-blue) !important;
|
| 580 |
}
|
|
@@ -582,10 +581,10 @@ def create_demo():
|
|
| 582 |
) as demo:
|
| 583 |
gr.Markdown("""
|
| 584 |
# 🏥 Medical Image Analyzer
|
| 585 |
-
|
| 586 |
Supports **DICOM** (.dcm) and all image formats with automatic modality detection!
|
| 587 |
""")
|
| 588 |
-
|
| 589 |
with gr.Row():
|
| 590 |
with gr.Column(scale=1):
|
| 591 |
# File upload - no file type restrictions
|
|
@@ -600,11 +599,11 @@ def create_demo():
|
|
| 600 |
)
|
| 601 |
gr.Markdown("""
|
| 602 |
<small style='color: #666;'>
|
| 603 |
-
Accepts: DICOM (.dcm, .dicom), Images (.png, .jpg, .jpeg, .tiff, .bmp),
|
| 604 |
and files without extensions (e.g., IM_0001, IM_0002, etc.)
|
| 605 |
</small>
|
| 606 |
""")
|
| 607 |
-
|
| 608 |
# Modality selection
|
| 609 |
modality = gr.Radio(
|
| 610 |
choices=["CT", "CR", "DX", "RX", "DR"],
|
|
@@ -612,7 +611,7 @@ def create_demo():
|
|
| 612 |
label="Modality",
|
| 613 |
info="Will be auto-detected for DICOM files"
|
| 614 |
)
|
| 615 |
-
|
| 616 |
# Task selection
|
| 617 |
task = gr.Dropdown(
|
| 618 |
choices=[
|
|
@@ -623,13 +622,13 @@ def create_demo():
|
|
| 623 |
value="full_analysis",
|
| 624 |
label="Analysis Task"
|
| 625 |
)
|
| 626 |
-
|
| 627 |
# ROI settings
|
| 628 |
with gr.Accordion("🎯 Region of Interest (ROI)", open=True):
|
| 629 |
roi_x = gr.Slider(0, 512, 256, label="X Position", step=1)
|
| 630 |
roi_y = gr.Slider(0, 512, 256, label="Y Position", step=1)
|
| 631 |
roi_radius = gr.Slider(5, 50, 10, label="Radius", step=1)
|
| 632 |
-
|
| 633 |
# Clinical context
|
| 634 |
with gr.Accordion("🏥 Clinical Context", open=False):
|
| 635 |
symptoms = gr.CheckboxGroup(
|
|
@@ -639,7 +638,7 @@ def create_demo():
|
|
| 639 |
],
|
| 640 |
label="Symptoms/Indication"
|
| 641 |
)
|
| 642 |
-
|
| 643 |
# Visualization options
|
| 644 |
with gr.Accordion("🎨 Visualization Options", open=True):
|
| 645 |
show_overlay = gr.Checkbox(
|
|
@@ -647,25 +646,25 @@ def create_demo():
|
|
| 647 |
value=True,
|
| 648 |
info="Display ROI circle or fat segmentation info on the image"
|
| 649 |
)
|
| 650 |
-
|
| 651 |
analyze_btn = gr.Button("🔬 Analyze", variant="primary", size="lg")
|
| 652 |
-
|
| 653 |
with gr.Column(scale=2):
|
| 654 |
# Results with tabs for different views
|
| 655 |
with gr.Tab("🖼️ Original Image"):
|
| 656 |
image_display = gr.Image(label="Medical Image", type="numpy")
|
| 657 |
-
|
| 658 |
with gr.Tab("🎯 Overlay View"):
|
| 659 |
overlay_display = gr.Image(label="Image with Overlay", type="numpy")
|
| 660 |
-
|
| 661 |
file_info = gr.Textbox(label="File Information", lines=1)
|
| 662 |
-
|
| 663 |
with gr.Tab("📊 Visual Report"):
|
| 664 |
report_html = gr.HTML()
|
| 665 |
-
|
| 666 |
with gr.Tab("🔧 JSON Output"):
|
| 667 |
json_output = gr.JSON(label="Structured Data for AI Agents")
|
| 668 |
-
|
| 669 |
# Examples and help
|
| 670 |
with gr.Row():
|
| 671 |
gr.Markdown("""
|
|
@@ -674,51 +673,51 @@ def create_demo():
|
|
| 674 |
- **PNG/JPG**: Interpreted based on selected modality
|
| 675 |
- **All Formats**: Automatic grayscale conversion
|
| 676 |
- **Files without extension**: Supported (e.g., IM_0001) - will try DICOM first
|
| 677 |
-
|
| 678 |
### 🎯 Usage
|
| 679 |
1. Upload a medical image file
|
| 680 |
2. Select modality (auto-detected for DICOM)
|
| 681 |
3. Choose analysis task
|
| 682 |
4. Adjust ROI position for point analysis
|
| 683 |
5. Click "Analyze"
|
| 684 |
-
|
| 685 |
### 💡 Features
|
| 686 |
- **ROI Visualization**: See the exact area being analyzed
|
| 687 |
- **Fat Segmentation**: Visual percentages for CT scans
|
| 688 |
- **Multi-format Support**: Works with any medical image format
|
| 689 |
- **AI Agent Ready**: Structured JSON output for integration
|
| 690 |
""")
|
| 691 |
-
|
| 692 |
# Connect the interface
|
| 693 |
analyze_btn.click(
|
| 694 |
fn=process_and_analyze,
|
| 695 |
inputs=[file_input, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay],
|
| 696 |
outputs=[image_display, file_info, report_html, json_output, overlay_display]
|
| 697 |
)
|
| 698 |
-
|
| 699 |
# Auto-update ROI limits when image is loaded
|
| 700 |
def update_roi_on_upload(file_obj):
|
| 701 |
if file_obj is None:
|
| 702 |
return gr.update(), gr.update()
|
| 703 |
-
|
| 704 |
try:
|
| 705 |
analyzer = MedicalImageAnalyzer()
|
| 706 |
_, _, metadata = analyzer.process_file(file_obj.name if hasattr(file_obj, 'name') else str(file_obj))
|
| 707 |
-
|
| 708 |
if 'shape' in metadata:
|
| 709 |
h, w = metadata['shape']
|
| 710 |
return gr.update(maximum=w-1, value=w//2), gr.update(maximum=h-1, value=h//2)
|
| 711 |
except:
|
| 712 |
pass
|
| 713 |
-
|
| 714 |
return gr.update(), gr.update()
|
| 715 |
-
|
| 716 |
file_input.change(
|
| 717 |
fn=update_roi_on_upload,
|
| 718 |
inputs=[file_input],
|
| 719 |
outputs=[roi_x, roi_y]
|
| 720 |
)
|
| 721 |
-
|
| 722 |
return demo
|
| 723 |
|
| 724 |
if __name__ == "__main__":
|
|
@@ -995,3 +994,4 @@ The code snippet below is accurate in cases where the component is used as both
|
|
| 995 |
) -> typing.Dict[str, typing.Any][str, typing.Any]:
|
| 996 |
return value
|
| 997 |
```
|
|
|
|
|
|
| 16 |
- ai-agents
|
| 17 |
---
|
| 18 |
|
|
|
|
| 19 |
# `gradio_medical_image_analyzer`
|
| 20 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.1.0%20-%20orange"> <a href="https://github.com/thedatadudech/gradio-medical-image-analyzer/issues" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/Issues-white?logo=github&logoColor=black"></a> <a href="https://huggingface.co/spaces/AbdullahIsaMarkus/gradio_medical_image_analyzer/discussions" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/%F0%9F%A4%97%20Discuss-%23097EFF?style=flat&logoColor=black"></a>
|
| 21 |
|
| 22 |
+
AI-agent optimized medical image analysis component for Gradio with DICOM support
|
| 23 |
|
| 24 |
## Installation
|
| 25 |
|
|
|
|
| 54 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 55 |
else:
|
| 56 |
image_rgb = image.copy()
|
| 57 |
+
|
| 58 |
# Draw ROI circle
|
| 59 |
center = (int(roi_x), int(roi_y))
|
| 60 |
radius = int(roi_radius)
|
| 61 |
+
|
| 62 |
# Draw outer circle (white)
|
| 63 |
cv2.circle(image_rgb, center, radius, (255, 255, 255), 2)
|
| 64 |
# Draw inner circle (red)
|
|
|
|
| 66 |
# Draw center cross
|
| 67 |
cv2.line(image_rgb, (center[0]-5, center[1]), (center[0]+5, center[1]), (255, 0, 0), 2)
|
| 68 |
cv2.line(image_rgb, (center[0], center[1]-5), (center[0], center[1]+5), (255, 0, 0), 2)
|
| 69 |
+
|
| 70 |
return image_rgb
|
| 71 |
|
| 72 |
def create_fat_overlay(base_image, segmentation_results):
|
|
|
|
| 76 |
overlay_img = cv2.cvtColor(base_image, cv2.COLOR_GRAY2RGB)
|
| 77 |
else:
|
| 78 |
overlay_img = base_image.copy()
|
| 79 |
+
|
| 80 |
# Check if we have segmentation masks
|
| 81 |
if not segmentation_results or 'segments' not in segmentation_results:
|
| 82 |
return overlay_img
|
| 83 |
+
|
| 84 |
segments = segmentation_results.get('segments', {})
|
| 85 |
+
|
| 86 |
# Apply subcutaneous fat overlay (yellow)
|
| 87 |
if 'subcutaneous' in segments and segments['subcutaneous'].get('mask') is not None:
|
| 88 |
mask = segments['subcutaneous']['mask']
|
| 89 |
yellow_overlay = np.zeros_like(overlay_img)
|
| 90 |
yellow_overlay[mask > 0] = [255, 255, 0] # Yellow
|
| 91 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, yellow_overlay, 0.3, 0)
|
| 92 |
+
|
| 93 |
# Apply visceral fat overlay (red)
|
| 94 |
if 'visceral' in segments and segments['visceral'].get('mask') is not None:
|
| 95 |
mask = segments['visceral']['mask']
|
| 96 |
red_overlay = np.zeros_like(overlay_img)
|
| 97 |
red_overlay[mask > 0] = [255, 0, 0] # Red
|
| 98 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, red_overlay, 0.3, 0)
|
| 99 |
+
|
| 100 |
# Add legend
|
| 101 |
+
cv2.putText(overlay_img, "Yellow: Subcutaneous Fat", (10, 30),
|
| 102 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 103 |
+
cv2.putText(overlay_img, "Red: Visceral Fat", (10, 60),
|
| 104 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 105 |
+
|
| 106 |
return overlay_img
|
| 107 |
|
| 108 |
def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay=False):
|
|
|
|
| 111 |
"""
|
| 112 |
if file_obj is None:
|
| 113 |
return None, "No file selected", None, {}, None
|
| 114 |
+
|
| 115 |
# Create analyzer instance
|
| 116 |
analyzer = MedicalImageAnalyzer(
|
| 117 |
analysis_mode="structured",
|
| 118 |
include_confidence=True,
|
| 119 |
include_reasoning=True
|
| 120 |
)
|
| 121 |
+
|
| 122 |
try:
|
| 123 |
# Process the file (DICOM or image)
|
| 124 |
file_path = file_obj.name if hasattr(file_obj, 'name') else str(file_obj)
|
| 125 |
pixel_array, display_array, metadata = analyzer.process_file(file_path)
|
| 126 |
+
|
| 127 |
# Update modality from file metadata if it's a DICOM
|
| 128 |
if metadata.get('file_type') == 'DICOM' and 'modality' in metadata:
|
| 129 |
modality = metadata['modality']
|
| 130 |
+
|
| 131 |
# Prepare analysis parameters
|
| 132 |
analysis_params = {
|
| 133 |
"image": pixel_array,
|
| 134 |
"modality": modality,
|
| 135 |
"task": task
|
| 136 |
}
|
| 137 |
+
|
| 138 |
# Add ROI if applicable
|
| 139 |
if task in ["analyze_point", "full_analysis"]:
|
| 140 |
# Scale ROI coordinates to image size
|
| 141 |
h, w = pixel_array.shape
|
| 142 |
roi_x_scaled = int(roi_x * w / 512) # Assuming slider max is 512
|
| 143 |
roi_y_scaled = int(roi_y * h / 512)
|
| 144 |
+
|
| 145 |
analysis_params["roi"] = {
|
| 146 |
"x": roi_x_scaled,
|
| 147 |
"y": roi_y_scaled,
|
| 148 |
"radius": roi_radius
|
| 149 |
}
|
| 150 |
+
|
| 151 |
# Add clinical context
|
| 152 |
if symptoms:
|
| 153 |
analysis_params["clinical_context"] = {"symptoms": symptoms}
|
| 154 |
+
|
| 155 |
# Perform analysis
|
| 156 |
results = analyzer.analyze_image(**analysis_params)
|
| 157 |
+
|
| 158 |
# Create visual report
|
| 159 |
visual_report = create_visual_report(results, metadata)
|
| 160 |
+
|
| 161 |
# Add metadata info
|
| 162 |
info = f"📄 {metadata.get('file_type', 'Unknown')} | "
|
| 163 |
info += f"🏥 {modality} | "
|
| 164 |
info += f"📐 {metadata.get('shape', 'Unknown')}"
|
| 165 |
+
|
| 166 |
if metadata.get('window_center'):
|
| 167 |
info += f" | Window C:{metadata['window_center']:.0f} W:{metadata['window_width']:.0f}"
|
| 168 |
+
|
| 169 |
# Create overlay image if requested
|
| 170 |
overlay_image = None
|
| 171 |
if show_overlay:
|
| 172 |
# For ROI visualization
|
| 173 |
if task in ["analyze_point", "full_analysis"] and roi_x and roi_y:
|
| 174 |
overlay_image = draw_roi_on_image(display_array.copy(), roi_x_scaled, roi_y_scaled, roi_radius)
|
| 175 |
+
|
| 176 |
# For fat segmentation overlay (simplified version since we don't have masks in current implementation)
|
| 177 |
elif task == "segment_fat" and 'segmentation' in results and modality == 'CT':
|
| 178 |
# For now, just draw ROI since we don't have actual masks
|
|
|
|
| 182 |
# Add text overlay about fat percentages
|
| 183 |
if 'statistics' in results['segmentation']:
|
| 184 |
stats = results['segmentation']['statistics']
|
| 185 |
+
cv2.putText(overlay_image, f"Total Fat: {stats.get('total_fat_percentage', 0):.1f}%",
|
| 186 |
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
| 187 |
+
cv2.putText(overlay_image, f"Subcutaneous: {stats.get('subcutaneous_fat_percentage', 0):.1f}%",
|
| 188 |
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 189 |
+
cv2.putText(overlay_image, f"Visceral: {stats.get('visceral_fat_percentage', 0):.1f}%",
|
| 190 |
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 191 |
+
|
| 192 |
return display_array, info, visual_report, results, overlay_image
|
| 193 |
+
|
| 194 |
except Exception as e:
|
| 195 |
error_msg = f"Error: {str(e)}"
|
| 196 |
return None, error_msg, f"<div style='color: red;'>❌ {error_msg}</div>", {"error": error_msg}, None
|
|
|
|
| 198 |
def create_visual_report(results, metadata):
|
| 199 |
"""Creates a visual HTML report with improved styling"""
|
| 200 |
html = f"""
|
| 201 |
+
<div class='medical-report' style='font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
| 202 |
+
padding: 24px;
|
| 203 |
+
background: #ffffff;
|
| 204 |
+
border-radius: 12px;
|
| 205 |
+
max-width: 100%;
|
| 206 |
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 207 |
color: #1a1a1a !important;'>
|
| 208 |
+
|
| 209 |
+
<h2 style='color: #1e40af !important;
|
| 210 |
+
border-bottom: 3px solid #3b82f6;
|
| 211 |
+
padding-bottom: 12px;
|
| 212 |
margin-bottom: 20px;
|
| 213 |
font-size: 24px;
|
| 214 |
font-weight: 600;'>
|
| 215 |
🏥 Medical Image Analysis Report
|
| 216 |
</h2>
|
| 217 |
+
|
| 218 |
+
<div style='background: #f0f9ff;
|
| 219 |
+
padding: 20px;
|
| 220 |
+
margin: 16px 0;
|
| 221 |
+
border-radius: 8px;
|
| 222 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 223 |
+
<h3 style='color: #1e3a8a !important;
|
| 224 |
+
font-size: 18px;
|
| 225 |
+
font-weight: 600;
|
| 226 |
margin-bottom: 12px;'>
|
| 227 |
📋 Metadata
|
| 228 |
</h3>
|
|
|
|
| 246 |
</table>
|
| 247 |
</div>
|
| 248 |
"""
|
| 249 |
+
|
| 250 |
# Point Analysis
|
| 251 |
if 'point_analysis' in results:
|
| 252 |
pa = results['point_analysis']
|
| 253 |
tissue = pa.get('tissue_type', {})
|
| 254 |
+
|
| 255 |
html += f"""
|
| 256 |
+
<div style='background: #f0f9ff;
|
| 257 |
+
padding: 20px;
|
| 258 |
+
margin: 16px 0;
|
| 259 |
+
border-radius: 8px;
|
| 260 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 261 |
+
<h3 style='color: #1e3a8a !important;
|
| 262 |
+
font-size: 18px;
|
| 263 |
+
font-weight: 600;
|
| 264 |
margin-bottom: 12px;'>
|
| 265 |
🎯 Point Analysis
|
| 266 |
</h3>
|
|
|
|
| 270 |
<td style='padding: 8px 0; color: #1f2937 !important;'>({pa.get('location', {}).get('x', 'N/A')}, {pa.get('location', {}).get('y', 'N/A')})</td>
|
| 271 |
</tr>
|
| 272 |
"""
|
| 273 |
+
|
| 274 |
if results.get('modality') == 'CT':
|
| 275 |
html += f"""
|
| 276 |
<tr>
|
|
|
|
| 285 |
<td style='padding: 8px 0; color: #1f2937 !important;'>{pa.get('intensity', 'N/A'):.3f}</td>
|
| 286 |
</tr>
|
| 287 |
"""
|
| 288 |
+
|
| 289 |
html += f"""
|
| 290 |
<tr>
|
| 291 |
<td style='padding: 8px 0; color: #4b5563 !important;'><strong style='color: #374151 !important;'>Tissue Type:</strong></td>
|
| 292 |
<td style='padding: 8px 0; color: #1f2937 !important;'>
|
| 293 |
+
<span style='font-size: 1.3em; vertical-align: middle;'>{tissue.get('icon', '')}</span>
|
| 294 |
<span style='font-weight: 500; text-transform: capitalize;'>{tissue.get('type', 'Unknown').replace('_', ' ')}</span>
|
| 295 |
</td>
|
| 296 |
</tr>
|
|
|
|
| 300 |
</tr>
|
| 301 |
</table>
|
| 302 |
"""
|
| 303 |
+
|
| 304 |
if 'reasoning' in pa:
|
| 305 |
html += f"""
|
| 306 |
+
<div style='margin-top: 12px;
|
| 307 |
+
padding: 12px;
|
| 308 |
+
background: #dbeafe;
|
| 309 |
+
border-left: 3px solid #3b82f6;
|
| 310 |
border-radius: 4px;'>
|
| 311 |
<p style='margin: 0; color: #1e40af !important; font-style: italic;'>
|
| 312 |
💭 {pa['reasoning']}
|
| 313 |
</p>
|
| 314 |
</div>
|
| 315 |
"""
|
| 316 |
+
|
| 317 |
html += "</div>"
|
| 318 |
+
|
| 319 |
# Segmentation Results
|
| 320 |
if 'segmentation' in results and results['segmentation']:
|
| 321 |
seg = results['segmentation']
|
| 322 |
+
|
| 323 |
if 'statistics' in seg:
|
| 324 |
# Fat segmentation for CT
|
| 325 |
stats = seg['statistics']
|
| 326 |
html += f"""
|
| 327 |
+
<div style='background: #f0f9ff;
|
| 328 |
+
padding: 20px;
|
| 329 |
+
margin: 16px 0;
|
| 330 |
+
border-radius: 8px;
|
| 331 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 332 |
+
<h3 style='color: #1e3a8a !important;
|
| 333 |
+
font-size: 18px;
|
| 334 |
+
font-weight: 600;
|
| 335 |
margin-bottom: 12px;'>
|
| 336 |
🔬 Fat Segmentation Analysis
|
| 337 |
</h3>
|
|
|
|
| 354 |
</div>
|
| 355 |
</div>
|
| 356 |
"""
|
| 357 |
+
|
| 358 |
if 'interpretation' in seg:
|
| 359 |
interp = seg['interpretation']
|
| 360 |
obesity_color = "#16a34a" if interp.get("obesity_risk") == "normal" else "#d97706" if interp.get("obesity_risk") == "moderate" else "#dc2626"
|
| 361 |
visceral_color = "#16a34a" if interp.get("visceral_risk") == "normal" else "#d97706" if interp.get("visceral_risk") == "moderate" else "#dc2626"
|
| 362 |
+
|
| 363 |
html += f"""
|
| 364 |
<div style='margin-top: 16px; padding: 16px; background: #f3f4f6; border-radius: 6px;'>
|
| 365 |
<h4 style='color: #374151 !important; font-size: 16px; font-weight: 600; margin-bottom: 8px;'>Risk Assessment</h4>
|
|
|
|
| 374 |
</div>
|
| 375 |
</div>
|
| 376 |
"""
|
| 377 |
+
|
| 378 |
if interp.get('recommendations'):
|
| 379 |
html += """
|
| 380 |
<div style='margin-top: 12px; padding-top: 12px; border-top: 1px solid #e5e7eb;'>
|
|
|
|
| 384 |
for rec in interp['recommendations']:
|
| 385 |
html += f"<li style='margin: 4px 0;'>{rec}</li>"
|
| 386 |
html += "</ul></div>"
|
| 387 |
+
|
| 388 |
html += "</div>"
|
| 389 |
html += "</div>"
|
| 390 |
+
|
| 391 |
# Quality Assessment
|
| 392 |
if 'quality_metrics' in results:
|
| 393 |
quality = results['quality_metrics']
|
|
|
|
| 399 |
'unknown': '#6b7280'
|
| 400 |
}
|
| 401 |
q_color = quality_colors.get(quality.get('overall_quality', 'unknown'), '#6b7280')
|
| 402 |
+
|
| 403 |
html += f"""
|
| 404 |
+
<div style='background: #f0f9ff;
|
| 405 |
+
padding: 20px;
|
| 406 |
+
margin: 16px 0;
|
| 407 |
+
border-radius: 8px;
|
| 408 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 409 |
+
<h3 style='color: #1e3a8a !important;
|
| 410 |
+
font-size: 18px;
|
| 411 |
+
font-weight: 600;
|
| 412 |
margin-bottom: 12px;'>
|
| 413 |
📊 Image Quality Assessment
|
| 414 |
</h3>
|
| 415 |
<div style='display: flex; align-items: center; gap: 16px;'>
|
| 416 |
<div>
|
| 417 |
<span style='color: #4b5563 !important; font-size: 14px;'>Overall Quality:</span>
|
| 418 |
+
<span style='color: {q_color} !important;
|
| 419 |
+
font-size: 18px;
|
| 420 |
+
font-weight: 700;
|
| 421 |
margin-left: 8px;'>
|
| 422 |
{quality.get('overall_quality', 'unknown').upper()}
|
| 423 |
</span>
|
| 424 |
</div>
|
| 425 |
</div>
|
| 426 |
"""
|
| 427 |
+
|
| 428 |
if quality.get('issues'):
|
| 429 |
html += f"""
|
| 430 |
+
<div style='margin-top: 12px;
|
| 431 |
+
padding: 12px;
|
| 432 |
+
background: #fef3c7;
|
| 433 |
+
border-left: 3px solid #f59e0b;
|
| 434 |
border-radius: 4px;'>
|
| 435 |
<strong style='color: #92400e !important;'>Issues Detected:</strong>
|
| 436 |
<ul style='margin: 4px 0 0 0; padding-left: 20px; color: #92400e !important;'>
|
|
|
|
| 438 |
for issue in quality['issues']:
|
| 439 |
html += f"<li style='margin: 2px 0;'>{issue}</li>"
|
| 440 |
html += "</ul></div>"
|
| 441 |
+
|
| 442 |
html += "</div>"
|
| 443 |
+
|
| 444 |
html += "</div>"
|
| 445 |
return html
|
| 446 |
|
| 447 |
def create_demo():
|
| 448 |
with gr.Blocks(
|
| 449 |
+
title="Medical Image Analyzer - Enhanced Demo",
|
| 450 |
theme=gr.themes.Soft(
|
| 451 |
primary_hue="blue",
|
| 452 |
secondary_hue="blue",
|
|
|
|
| 485 |
--bg-secondary: #1e293b;
|
| 486 |
--bg-tertiary: #334155;
|
| 487 |
}
|
| 488 |
+
|
| 489 |
/* Override default text colors for medical theme */
|
| 490 |
* {
|
| 491 |
color: var(--text-primary) !important;
|
| 492 |
}
|
| 493 |
+
|
| 494 |
/* Style the file upload area */
|
| 495 |
.file-upload {
|
| 496 |
border: 2px dashed var(--medical-blue-light) !important;
|
|
|
|
| 501 |
transition: all 0.3s ease !important;
|
| 502 |
color: var(--text-primary) !important;
|
| 503 |
}
|
| 504 |
+
|
| 505 |
.file-upload:hover {
|
| 506 |
border-color: var(--medical-blue) !important;
|
| 507 |
background: var(--bg-tertiary) !important;
|
| 508 |
box-shadow: 0 0 20px rgba(59, 130, 246, 0.2) !important;
|
| 509 |
}
|
| 510 |
+
|
| 511 |
/* Ensure report text is readable with white background */
|
| 512 |
.medical-report {
|
| 513 |
background: #ffffff !important;
|
|
|
|
| 516 |
padding: 16px !important;
|
| 517 |
color: #1a1a1a !important;
|
| 518 |
}
|
| 519 |
+
|
| 520 |
.medical-report * {
|
| 521 |
color: #1f2937 !important; /* Dark gray text */
|
| 522 |
}
|
| 523 |
+
|
| 524 |
.medical-report h2 {
|
| 525 |
color: #1e40af !important; /* Medical blue for main heading */
|
| 526 |
}
|
| 527 |
+
|
| 528 |
.medical-report h3, .medical-report h4 {
|
| 529 |
color: #1e3a8a !important; /* Darker medical blue for subheadings */
|
| 530 |
}
|
| 531 |
+
|
| 532 |
.medical-report strong {
|
| 533 |
color: #374151 !important; /* Darker gray for labels */
|
| 534 |
}
|
| 535 |
+
|
| 536 |
.medical-report td {
|
| 537 |
color: #1f2937 !important; /* Ensure table text is dark */
|
| 538 |
}
|
| 539 |
+
|
| 540 |
/* Report sections with light blue background */
|
| 541 |
.medical-report > div {
|
| 542 |
background: #f0f9ff !important;
|
| 543 |
color: #1f2937 !important;
|
| 544 |
}
|
| 545 |
+
|
| 546 |
/* Medical blue accents for UI elements */
|
| 547 |
.gr-button-primary {
|
| 548 |
background: var(--medical-blue) !important;
|
| 549 |
border-color: var(--medical-blue) !important;
|
| 550 |
}
|
| 551 |
+
|
| 552 |
.gr-button-primary:hover {
|
| 553 |
background: var(--medical-blue-dark) !important;
|
| 554 |
border-color: var(--medical-blue-dark) !important;
|
| 555 |
}
|
| 556 |
+
|
| 557 |
/* Tab styling */
|
| 558 |
.gr-tab-item {
|
| 559 |
border-color: var(--medical-blue-light) !important;
|
| 560 |
}
|
| 561 |
+
|
| 562 |
.gr-tab-item.selected {
|
| 563 |
background: var(--medical-blue) !important;
|
| 564 |
color: white !important;
|
| 565 |
}
|
| 566 |
+
|
| 567 |
/* Accordion styling */
|
| 568 |
.gr-accordion {
|
| 569 |
border-color: var(--medical-blue-light) !important;
|
| 570 |
}
|
| 571 |
+
|
| 572 |
/* Slider track in medical blue */
|
| 573 |
input[type="range"]::-webkit-slider-track {
|
| 574 |
background: var(--bg-tertiary) !important;
|
| 575 |
}
|
| 576 |
+
|
| 577 |
input[type="range"]::-webkit-slider-thumb {
|
| 578 |
background: var(--medical-blue) !important;
|
| 579 |
}
|
|
|
|
| 581 |
) as demo:
|
| 582 |
gr.Markdown("""
|
| 583 |
# 🏥 Medical Image Analyzer
|
| 584 |
+
|
| 585 |
Supports **DICOM** (.dcm) and all image formats with automatic modality detection!
|
| 586 |
""")
|
| 587 |
+
|
| 588 |
with gr.Row():
|
| 589 |
with gr.Column(scale=1):
|
| 590 |
# File upload - no file type restrictions
|
|
|
|
| 599 |
)
|
| 600 |
gr.Markdown("""
|
| 601 |
<small style='color: #666;'>
|
| 602 |
+
Accepts: DICOM (.dcm, .dicom), Images (.png, .jpg, .jpeg, .tiff, .bmp),
|
| 603 |
and files without extensions (e.g., IM_0001, IM_0002, etc.)
|
| 604 |
</small>
|
| 605 |
""")
|
| 606 |
+
|
| 607 |
# Modality selection
|
| 608 |
modality = gr.Radio(
|
| 609 |
choices=["CT", "CR", "DX", "RX", "DR"],
|
|
|
|
| 611 |
label="Modality",
|
| 612 |
info="Will be auto-detected for DICOM files"
|
| 613 |
)
|
| 614 |
+
|
| 615 |
# Task selection
|
| 616 |
task = gr.Dropdown(
|
| 617 |
choices=[
|
|
|
|
| 622 |
value="full_analysis",
|
| 623 |
label="Analysis Task"
|
| 624 |
)
|
| 625 |
+
|
| 626 |
# ROI settings
|
| 627 |
with gr.Accordion("🎯 Region of Interest (ROI)", open=True):
|
| 628 |
roi_x = gr.Slider(0, 512, 256, label="X Position", step=1)
|
| 629 |
roi_y = gr.Slider(0, 512, 256, label="Y Position", step=1)
|
| 630 |
roi_radius = gr.Slider(5, 50, 10, label="Radius", step=1)
|
| 631 |
+
|
| 632 |
# Clinical context
|
| 633 |
with gr.Accordion("🏥 Clinical Context", open=False):
|
| 634 |
symptoms = gr.CheckboxGroup(
|
|
|
|
| 638 |
],
|
| 639 |
label="Symptoms/Indication"
|
| 640 |
)
|
| 641 |
+
|
| 642 |
# Visualization options
|
| 643 |
with gr.Accordion("🎨 Visualization Options", open=True):
|
| 644 |
show_overlay = gr.Checkbox(
|
|
|
|
| 646 |
value=True,
|
| 647 |
info="Display ROI circle or fat segmentation info on the image"
|
| 648 |
)
|
| 649 |
+
|
| 650 |
analyze_btn = gr.Button("🔬 Analyze", variant="primary", size="lg")
|
| 651 |
+
|
| 652 |
with gr.Column(scale=2):
|
| 653 |
# Results with tabs for different views
|
| 654 |
with gr.Tab("🖼️ Original Image"):
|
| 655 |
image_display = gr.Image(label="Medical Image", type="numpy")
|
| 656 |
+
|
| 657 |
with gr.Tab("🎯 Overlay View"):
|
| 658 |
overlay_display = gr.Image(label="Image with Overlay", type="numpy")
|
| 659 |
+
|
| 660 |
file_info = gr.Textbox(label="File Information", lines=1)
|
| 661 |
+
|
| 662 |
with gr.Tab("📊 Visual Report"):
|
| 663 |
report_html = gr.HTML()
|
| 664 |
+
|
| 665 |
with gr.Tab("🔧 JSON Output"):
|
| 666 |
json_output = gr.JSON(label="Structured Data for AI Agents")
|
| 667 |
+
|
| 668 |
# Examples and help
|
| 669 |
with gr.Row():
|
| 670 |
gr.Markdown("""
|
|
|
|
| 673 |
- **PNG/JPG**: Interpreted based on selected modality
|
| 674 |
- **All Formats**: Automatic grayscale conversion
|
| 675 |
- **Files without extension**: Supported (e.g., IM_0001) - will try DICOM first
|
| 676 |
+
|
| 677 |
### 🎯 Usage
|
| 678 |
1. Upload a medical image file
|
| 679 |
2. Select modality (auto-detected for DICOM)
|
| 680 |
3. Choose analysis task
|
| 681 |
4. Adjust ROI position for point analysis
|
| 682 |
5. Click "Analyze"
|
| 683 |
+
|
| 684 |
### 💡 Features
|
| 685 |
- **ROI Visualization**: See the exact area being analyzed
|
| 686 |
- **Fat Segmentation**: Visual percentages for CT scans
|
| 687 |
- **Multi-format Support**: Works with any medical image format
|
| 688 |
- **AI Agent Ready**: Structured JSON output for integration
|
| 689 |
""")
|
| 690 |
+
|
| 691 |
# Connect the interface
|
| 692 |
analyze_btn.click(
|
| 693 |
fn=process_and_analyze,
|
| 694 |
inputs=[file_input, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay],
|
| 695 |
outputs=[image_display, file_info, report_html, json_output, overlay_display]
|
| 696 |
)
|
| 697 |
+
|
| 698 |
# Auto-update ROI limits when image is loaded
|
| 699 |
def update_roi_on_upload(file_obj):
|
| 700 |
if file_obj is None:
|
| 701 |
return gr.update(), gr.update()
|
| 702 |
+
|
| 703 |
try:
|
| 704 |
analyzer = MedicalImageAnalyzer()
|
| 705 |
_, _, metadata = analyzer.process_file(file_obj.name if hasattr(file_obj, 'name') else str(file_obj))
|
| 706 |
+
|
| 707 |
if 'shape' in metadata:
|
| 708 |
h, w = metadata['shape']
|
| 709 |
return gr.update(maximum=w-1, value=w//2), gr.update(maximum=h-1, value=h//2)
|
| 710 |
except:
|
| 711 |
pass
|
| 712 |
+
|
| 713 |
return gr.update(), gr.update()
|
| 714 |
+
|
| 715 |
file_input.change(
|
| 716 |
fn=update_roi_on_upload,
|
| 717 |
inputs=[file_input],
|
| 718 |
outputs=[roi_x, roi_y]
|
| 719 |
)
|
| 720 |
+
|
| 721 |
return demo
|
| 722 |
|
| 723 |
if __name__ == "__main__":
|
|
|
|
| 994 |
) -> typing.Dict[str, typing.Any][str, typing.Any]:
|
| 995 |
return value
|
| 996 |
```
|
| 997 |
+
|
space.py
CHANGED
|
@@ -21,10 +21,10 @@ with gr.Blocks(
|
|
| 21 |
# `gradio_medical_image_analyzer`
|
| 22 |
|
| 23 |
<div style="display: flex; gap: 7px;">
|
| 24 |
-
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0
|
| 25 |
</div>
|
| 26 |
|
| 27 |
-
AI-agent optimized medical image analysis component for Gradio
|
| 28 |
""", elem_classes=["md-custom"], header_links=True)
|
| 29 |
app.render()
|
| 30 |
gr.Markdown(
|
|
|
|
| 21 |
# `gradio_medical_image_analyzer`
|
| 22 |
|
| 23 |
<div style="display: flex; gap: 7px;">
|
| 24 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.1.0%20-%20orange"> <a href="https://github.com/thedatadudech/gradio-medical-image-analyzer/issues" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/Issues-white?logo=github&logoColor=black"></a> <a href="https://huggingface.co/spaces/AbdullahIsaMarkus/gradio_medical_image_analyzer/discussions" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/%F0%9F%A4%97%20Discuss-%23097EFF?style=flat&logoColor=black"></a>
|
| 25 |
</div>
|
| 26 |
|
| 27 |
+
AI-agent optimized medical image analysis component for Gradio with DICOM support
|
| 28 |
""", elem_classes=["md-custom"], header_links=True)
|
| 29 |
app.render()
|
| 30 |
gr.Markdown(
|
src/README.md
CHANGED
|
@@ -16,11 +16,10 @@ tags:
|
|
| 16 |
- ai-agents
|
| 17 |
---
|
| 18 |
|
| 19 |
-
|
| 20 |
# `gradio_medical_image_analyzer`
|
| 21 |
-
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0
|
| 22 |
|
| 23 |
-
AI-agent optimized medical image analysis component for Gradio
|
| 24 |
|
| 25 |
## Installation
|
| 26 |
|
|
@@ -55,11 +54,11 @@ def draw_roi_on_image(image, roi_x, roi_y, roi_radius):
|
|
| 55 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 56 |
else:
|
| 57 |
image_rgb = image.copy()
|
| 58 |
-
|
| 59 |
# Draw ROI circle
|
| 60 |
center = (int(roi_x), int(roi_y))
|
| 61 |
radius = int(roi_radius)
|
| 62 |
-
|
| 63 |
# Draw outer circle (white)
|
| 64 |
cv2.circle(image_rgb, center, radius, (255, 255, 255), 2)
|
| 65 |
# Draw inner circle (red)
|
|
@@ -67,7 +66,7 @@ def draw_roi_on_image(image, roi_x, roi_y, roi_radius):
|
|
| 67 |
# Draw center cross
|
| 68 |
cv2.line(image_rgb, (center[0]-5, center[1]), (center[0]+5, center[1]), (255, 0, 0), 2)
|
| 69 |
cv2.line(image_rgb, (center[0], center[1]-5), (center[0], center[1]+5), (255, 0, 0), 2)
|
| 70 |
-
|
| 71 |
return image_rgb
|
| 72 |
|
| 73 |
def create_fat_overlay(base_image, segmentation_results):
|
|
@@ -77,33 +76,33 @@ def create_fat_overlay(base_image, segmentation_results):
|
|
| 77 |
overlay_img = cv2.cvtColor(base_image, cv2.COLOR_GRAY2RGB)
|
| 78 |
else:
|
| 79 |
overlay_img = base_image.copy()
|
| 80 |
-
|
| 81 |
# Check if we have segmentation masks
|
| 82 |
if not segmentation_results or 'segments' not in segmentation_results:
|
| 83 |
return overlay_img
|
| 84 |
-
|
| 85 |
segments = segmentation_results.get('segments', {})
|
| 86 |
-
|
| 87 |
# Apply subcutaneous fat overlay (yellow)
|
| 88 |
if 'subcutaneous' in segments and segments['subcutaneous'].get('mask') is not None:
|
| 89 |
mask = segments['subcutaneous']['mask']
|
| 90 |
yellow_overlay = np.zeros_like(overlay_img)
|
| 91 |
yellow_overlay[mask > 0] = [255, 255, 0] # Yellow
|
| 92 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, yellow_overlay, 0.3, 0)
|
| 93 |
-
|
| 94 |
# Apply visceral fat overlay (red)
|
| 95 |
if 'visceral' in segments and segments['visceral'].get('mask') is not None:
|
| 96 |
mask = segments['visceral']['mask']
|
| 97 |
red_overlay = np.zeros_like(overlay_img)
|
| 98 |
red_overlay[mask > 0] = [255, 0, 0] # Red
|
| 99 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, red_overlay, 0.3, 0)
|
| 100 |
-
|
| 101 |
# Add legend
|
| 102 |
-
cv2.putText(overlay_img, "Yellow: Subcutaneous Fat", (10, 30),
|
| 103 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 104 |
-
cv2.putText(overlay_img, "Red: Visceral Fat", (10, 60),
|
| 105 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 106 |
-
|
| 107 |
return overlay_img
|
| 108 |
|
| 109 |
def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay=False):
|
|
@@ -112,68 +111,68 @@ def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symp
|
|
| 112 |
"""
|
| 113 |
if file_obj is None:
|
| 114 |
return None, "No file selected", None, {}, None
|
| 115 |
-
|
| 116 |
# Create analyzer instance
|
| 117 |
analyzer = MedicalImageAnalyzer(
|
| 118 |
analysis_mode="structured",
|
| 119 |
include_confidence=True,
|
| 120 |
include_reasoning=True
|
| 121 |
)
|
| 122 |
-
|
| 123 |
try:
|
| 124 |
# Process the file (DICOM or image)
|
| 125 |
file_path = file_obj.name if hasattr(file_obj, 'name') else str(file_obj)
|
| 126 |
pixel_array, display_array, metadata = analyzer.process_file(file_path)
|
| 127 |
-
|
| 128 |
# Update modality from file metadata if it's a DICOM
|
| 129 |
if metadata.get('file_type') == 'DICOM' and 'modality' in metadata:
|
| 130 |
modality = metadata['modality']
|
| 131 |
-
|
| 132 |
# Prepare analysis parameters
|
| 133 |
analysis_params = {
|
| 134 |
"image": pixel_array,
|
| 135 |
"modality": modality,
|
| 136 |
"task": task
|
| 137 |
}
|
| 138 |
-
|
| 139 |
# Add ROI if applicable
|
| 140 |
if task in ["analyze_point", "full_analysis"]:
|
| 141 |
# Scale ROI coordinates to image size
|
| 142 |
h, w = pixel_array.shape
|
| 143 |
roi_x_scaled = int(roi_x * w / 512) # Assuming slider max is 512
|
| 144 |
roi_y_scaled = int(roi_y * h / 512)
|
| 145 |
-
|
| 146 |
analysis_params["roi"] = {
|
| 147 |
"x": roi_x_scaled,
|
| 148 |
"y": roi_y_scaled,
|
| 149 |
"radius": roi_radius
|
| 150 |
}
|
| 151 |
-
|
| 152 |
# Add clinical context
|
| 153 |
if symptoms:
|
| 154 |
analysis_params["clinical_context"] = {"symptoms": symptoms}
|
| 155 |
-
|
| 156 |
# Perform analysis
|
| 157 |
results = analyzer.analyze_image(**analysis_params)
|
| 158 |
-
|
| 159 |
# Create visual report
|
| 160 |
visual_report = create_visual_report(results, metadata)
|
| 161 |
-
|
| 162 |
# Add metadata info
|
| 163 |
info = f"📄 {metadata.get('file_type', 'Unknown')} | "
|
| 164 |
info += f"🏥 {modality} | "
|
| 165 |
info += f"📐 {metadata.get('shape', 'Unknown')}"
|
| 166 |
-
|
| 167 |
if metadata.get('window_center'):
|
| 168 |
info += f" | Window C:{metadata['window_center']:.0f} W:{metadata['window_width']:.0f}"
|
| 169 |
-
|
| 170 |
# Create overlay image if requested
|
| 171 |
overlay_image = None
|
| 172 |
if show_overlay:
|
| 173 |
# For ROI visualization
|
| 174 |
if task in ["analyze_point", "full_analysis"] and roi_x and roi_y:
|
| 175 |
overlay_image = draw_roi_on_image(display_array.copy(), roi_x_scaled, roi_y_scaled, roi_radius)
|
| 176 |
-
|
| 177 |
# For fat segmentation overlay (simplified version since we don't have masks in current implementation)
|
| 178 |
elif task == "segment_fat" and 'segmentation' in results and modality == 'CT':
|
| 179 |
# For now, just draw ROI since we don't have actual masks
|
|
@@ -183,15 +182,15 @@ def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symp
|
|
| 183 |
# Add text overlay about fat percentages
|
| 184 |
if 'statistics' in results['segmentation']:
|
| 185 |
stats = results['segmentation']['statistics']
|
| 186 |
-
cv2.putText(overlay_image, f"Total Fat: {stats.get('total_fat_percentage', 0):.1f}%",
|
| 187 |
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
| 188 |
-
cv2.putText(overlay_image, f"Subcutaneous: {stats.get('subcutaneous_fat_percentage', 0):.1f}%",
|
| 189 |
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 190 |
-
cv2.putText(overlay_image, f"Visceral: {stats.get('visceral_fat_percentage', 0):.1f}%",
|
| 191 |
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 192 |
-
|
| 193 |
return display_array, info, visual_report, results, overlay_image
|
| 194 |
-
|
| 195 |
except Exception as e:
|
| 196 |
error_msg = f"Error: {str(e)}"
|
| 197 |
return None, error_msg, f"<div style='color: red;'>❌ {error_msg}</div>", {"error": error_msg}, None
|
|
@@ -199,31 +198,31 @@ def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symp
|
|
| 199 |
def create_visual_report(results, metadata):
|
| 200 |
"""Creates a visual HTML report with improved styling"""
|
| 201 |
html = f"""
|
| 202 |
-
<div class='medical-report' style='font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
| 203 |
-
padding: 24px;
|
| 204 |
-
background: #ffffff;
|
| 205 |
-
border-radius: 12px;
|
| 206 |
-
max-width: 100%;
|
| 207 |
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 208 |
color: #1a1a1a !important;'>
|
| 209 |
-
|
| 210 |
-
<h2 style='color: #1e40af !important;
|
| 211 |
-
border-bottom: 3px solid #3b82f6;
|
| 212 |
-
padding-bottom: 12px;
|
| 213 |
margin-bottom: 20px;
|
| 214 |
font-size: 24px;
|
| 215 |
font-weight: 600;'>
|
| 216 |
🏥 Medical Image Analysis Report
|
| 217 |
</h2>
|
| 218 |
-
|
| 219 |
-
<div style='background: #f0f9ff;
|
| 220 |
-
padding: 20px;
|
| 221 |
-
margin: 16px 0;
|
| 222 |
-
border-radius: 8px;
|
| 223 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 224 |
-
<h3 style='color: #1e3a8a !important;
|
| 225 |
-
font-size: 18px;
|
| 226 |
-
font-weight: 600;
|
| 227 |
margin-bottom: 12px;'>
|
| 228 |
📋 Metadata
|
| 229 |
</h3>
|
|
@@ -247,21 +246,21 @@ def create_visual_report(results, metadata):
|
|
| 247 |
</table>
|
| 248 |
</div>
|
| 249 |
"""
|
| 250 |
-
|
| 251 |
# Point Analysis
|
| 252 |
if 'point_analysis' in results:
|
| 253 |
pa = results['point_analysis']
|
| 254 |
tissue = pa.get('tissue_type', {})
|
| 255 |
-
|
| 256 |
html += f"""
|
| 257 |
-
<div style='background: #f0f9ff;
|
| 258 |
-
padding: 20px;
|
| 259 |
-
margin: 16px 0;
|
| 260 |
-
border-radius: 8px;
|
| 261 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 262 |
-
<h3 style='color: #1e3a8a !important;
|
| 263 |
-
font-size: 18px;
|
| 264 |
-
font-weight: 600;
|
| 265 |
margin-bottom: 12px;'>
|
| 266 |
🎯 Point Analysis
|
| 267 |
</h3>
|
|
@@ -271,7 +270,7 @@ def create_visual_report(results, metadata):
|
|
| 271 |
<td style='padding: 8px 0; color: #1f2937 !important;'>({pa.get('location', {}).get('x', 'N/A')}, {pa.get('location', {}).get('y', 'N/A')})</td>
|
| 272 |
</tr>
|
| 273 |
"""
|
| 274 |
-
|
| 275 |
if results.get('modality') == 'CT':
|
| 276 |
html += f"""
|
| 277 |
<tr>
|
|
@@ -286,12 +285,12 @@ def create_visual_report(results, metadata):
|
|
| 286 |
<td style='padding: 8px 0; color: #1f2937 !important;'>{pa.get('intensity', 'N/A'):.3f}</td>
|
| 287 |
</tr>
|
| 288 |
"""
|
| 289 |
-
|
| 290 |
html += f"""
|
| 291 |
<tr>
|
| 292 |
<td style='padding: 8px 0; color: #4b5563 !important;'><strong style='color: #374151 !important;'>Tissue Type:</strong></td>
|
| 293 |
<td style='padding: 8px 0; color: #1f2937 !important;'>
|
| 294 |
-
<span style='font-size: 1.3em; vertical-align: middle;'>{tissue.get('icon', '')}</span>
|
| 295 |
<span style='font-weight: 500; text-transform: capitalize;'>{tissue.get('type', 'Unknown').replace('_', ' ')}</span>
|
| 296 |
</td>
|
| 297 |
</tr>
|
|
@@ -301,38 +300,38 @@ def create_visual_report(results, metadata):
|
|
| 301 |
</tr>
|
| 302 |
</table>
|
| 303 |
"""
|
| 304 |
-
|
| 305 |
if 'reasoning' in pa:
|
| 306 |
html += f"""
|
| 307 |
-
<div style='margin-top: 12px;
|
| 308 |
-
padding: 12px;
|
| 309 |
-
background: #dbeafe;
|
| 310 |
-
border-left: 3px solid #3b82f6;
|
| 311 |
border-radius: 4px;'>
|
| 312 |
<p style='margin: 0; color: #1e40af !important; font-style: italic;'>
|
| 313 |
💭 {pa['reasoning']}
|
| 314 |
</p>
|
| 315 |
</div>
|
| 316 |
"""
|
| 317 |
-
|
| 318 |
html += "</div>"
|
| 319 |
-
|
| 320 |
# Segmentation Results
|
| 321 |
if 'segmentation' in results and results['segmentation']:
|
| 322 |
seg = results['segmentation']
|
| 323 |
-
|
| 324 |
if 'statistics' in seg:
|
| 325 |
# Fat segmentation for CT
|
| 326 |
stats = seg['statistics']
|
| 327 |
html += f"""
|
| 328 |
-
<div style='background: #f0f9ff;
|
| 329 |
-
padding: 20px;
|
| 330 |
-
margin: 16px 0;
|
| 331 |
-
border-radius: 8px;
|
| 332 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 333 |
-
<h3 style='color: #1e3a8a !important;
|
| 334 |
-
font-size: 18px;
|
| 335 |
-
font-weight: 600;
|
| 336 |
margin-bottom: 12px;'>
|
| 337 |
🔬 Fat Segmentation Analysis
|
| 338 |
</h3>
|
|
@@ -355,12 +354,12 @@ def create_visual_report(results, metadata):
|
|
| 355 |
</div>
|
| 356 |
</div>
|
| 357 |
"""
|
| 358 |
-
|
| 359 |
if 'interpretation' in seg:
|
| 360 |
interp = seg['interpretation']
|
| 361 |
obesity_color = "#16a34a" if interp.get("obesity_risk") == "normal" else "#d97706" if interp.get("obesity_risk") == "moderate" else "#dc2626"
|
| 362 |
visceral_color = "#16a34a" if interp.get("visceral_risk") == "normal" else "#d97706" if interp.get("visceral_risk") == "moderate" else "#dc2626"
|
| 363 |
-
|
| 364 |
html += f"""
|
| 365 |
<div style='margin-top: 16px; padding: 16px; background: #f3f4f6; border-radius: 6px;'>
|
| 366 |
<h4 style='color: #374151 !important; font-size: 16px; font-weight: 600; margin-bottom: 8px;'>Risk Assessment</h4>
|
|
@@ -375,7 +374,7 @@ def create_visual_report(results, metadata):
|
|
| 375 |
</div>
|
| 376 |
</div>
|
| 377 |
"""
|
| 378 |
-
|
| 379 |
if interp.get('recommendations'):
|
| 380 |
html += """
|
| 381 |
<div style='margin-top: 12px; padding-top: 12px; border-top: 1px solid #e5e7eb;'>
|
|
@@ -385,10 +384,10 @@ def create_visual_report(results, metadata):
|
|
| 385 |
for rec in interp['recommendations']:
|
| 386 |
html += f"<li style='margin: 4px 0;'>{rec}</li>"
|
| 387 |
html += "</ul></div>"
|
| 388 |
-
|
| 389 |
html += "</div>"
|
| 390 |
html += "</div>"
|
| 391 |
-
|
| 392 |
# Quality Assessment
|
| 393 |
if 'quality_metrics' in results:
|
| 394 |
quality = results['quality_metrics']
|
|
@@ -400,38 +399,38 @@ def create_visual_report(results, metadata):
|
|
| 400 |
'unknown': '#6b7280'
|
| 401 |
}
|
| 402 |
q_color = quality_colors.get(quality.get('overall_quality', 'unknown'), '#6b7280')
|
| 403 |
-
|
| 404 |
html += f"""
|
| 405 |
-
<div style='background: #f0f9ff;
|
| 406 |
-
padding: 20px;
|
| 407 |
-
margin: 16px 0;
|
| 408 |
-
border-radius: 8px;
|
| 409 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 410 |
-
<h3 style='color: #1e3a8a !important;
|
| 411 |
-
font-size: 18px;
|
| 412 |
-
font-weight: 600;
|
| 413 |
margin-bottom: 12px;'>
|
| 414 |
📊 Image Quality Assessment
|
| 415 |
</h3>
|
| 416 |
<div style='display: flex; align-items: center; gap: 16px;'>
|
| 417 |
<div>
|
| 418 |
<span style='color: #4b5563 !important; font-size: 14px;'>Overall Quality:</span>
|
| 419 |
-
<span style='color: {q_color} !important;
|
| 420 |
-
font-size: 18px;
|
| 421 |
-
font-weight: 700;
|
| 422 |
margin-left: 8px;'>
|
| 423 |
{quality.get('overall_quality', 'unknown').upper()}
|
| 424 |
</span>
|
| 425 |
</div>
|
| 426 |
</div>
|
| 427 |
"""
|
| 428 |
-
|
| 429 |
if quality.get('issues'):
|
| 430 |
html += f"""
|
| 431 |
-
<div style='margin-top: 12px;
|
| 432 |
-
padding: 12px;
|
| 433 |
-
background: #fef3c7;
|
| 434 |
-
border-left: 3px solid #f59e0b;
|
| 435 |
border-radius: 4px;'>
|
| 436 |
<strong style='color: #92400e !important;'>Issues Detected:</strong>
|
| 437 |
<ul style='margin: 4px 0 0 0; padding-left: 20px; color: #92400e !important;'>
|
|
@@ -439,15 +438,15 @@ def create_visual_report(results, metadata):
|
|
| 439 |
for issue in quality['issues']:
|
| 440 |
html += f"<li style='margin: 2px 0;'>{issue}</li>"
|
| 441 |
html += "</ul></div>"
|
| 442 |
-
|
| 443 |
html += "</div>"
|
| 444 |
-
|
| 445 |
html += "</div>"
|
| 446 |
return html
|
| 447 |
|
| 448 |
def create_demo():
|
| 449 |
with gr.Blocks(
|
| 450 |
-
title="Medical Image Analyzer - Enhanced Demo",
|
| 451 |
theme=gr.themes.Soft(
|
| 452 |
primary_hue="blue",
|
| 453 |
secondary_hue="blue",
|
|
@@ -486,12 +485,12 @@ def create_demo():
|
|
| 486 |
--bg-secondary: #1e293b;
|
| 487 |
--bg-tertiary: #334155;
|
| 488 |
}
|
| 489 |
-
|
| 490 |
/* Override default text colors for medical theme */
|
| 491 |
* {
|
| 492 |
color: var(--text-primary) !important;
|
| 493 |
}
|
| 494 |
-
|
| 495 |
/* Style the file upload area */
|
| 496 |
.file-upload {
|
| 497 |
border: 2px dashed var(--medical-blue-light) !important;
|
|
@@ -502,13 +501,13 @@ def create_demo():
|
|
| 502 |
transition: all 0.3s ease !important;
|
| 503 |
color: var(--text-primary) !important;
|
| 504 |
}
|
| 505 |
-
|
| 506 |
.file-upload:hover {
|
| 507 |
border-color: var(--medical-blue) !important;
|
| 508 |
background: var(--bg-tertiary) !important;
|
| 509 |
box-shadow: 0 0 20px rgba(59, 130, 246, 0.2) !important;
|
| 510 |
}
|
| 511 |
-
|
| 512 |
/* Ensure report text is readable with white background */
|
| 513 |
.medical-report {
|
| 514 |
background: #ffffff !important;
|
|
@@ -517,64 +516,64 @@ def create_demo():
|
|
| 517 |
padding: 16px !important;
|
| 518 |
color: #1a1a1a !important;
|
| 519 |
}
|
| 520 |
-
|
| 521 |
.medical-report * {
|
| 522 |
color: #1f2937 !important; /* Dark gray text */
|
| 523 |
}
|
| 524 |
-
|
| 525 |
.medical-report h2 {
|
| 526 |
color: #1e40af !important; /* Medical blue for main heading */
|
| 527 |
}
|
| 528 |
-
|
| 529 |
.medical-report h3, .medical-report h4 {
|
| 530 |
color: #1e3a8a !important; /* Darker medical blue for subheadings */
|
| 531 |
}
|
| 532 |
-
|
| 533 |
.medical-report strong {
|
| 534 |
color: #374151 !important; /* Darker gray for labels */
|
| 535 |
}
|
| 536 |
-
|
| 537 |
.medical-report td {
|
| 538 |
color: #1f2937 !important; /* Ensure table text is dark */
|
| 539 |
}
|
| 540 |
-
|
| 541 |
/* Report sections with light blue background */
|
| 542 |
.medical-report > div {
|
| 543 |
background: #f0f9ff !important;
|
| 544 |
color: #1f2937 !important;
|
| 545 |
}
|
| 546 |
-
|
| 547 |
/* Medical blue accents for UI elements */
|
| 548 |
.gr-button-primary {
|
| 549 |
background: var(--medical-blue) !important;
|
| 550 |
border-color: var(--medical-blue) !important;
|
| 551 |
}
|
| 552 |
-
|
| 553 |
.gr-button-primary:hover {
|
| 554 |
background: var(--medical-blue-dark) !important;
|
| 555 |
border-color: var(--medical-blue-dark) !important;
|
| 556 |
}
|
| 557 |
-
|
| 558 |
/* Tab styling */
|
| 559 |
.gr-tab-item {
|
| 560 |
border-color: var(--medical-blue-light) !important;
|
| 561 |
}
|
| 562 |
-
|
| 563 |
.gr-tab-item.selected {
|
| 564 |
background: var(--medical-blue) !important;
|
| 565 |
color: white !important;
|
| 566 |
}
|
| 567 |
-
|
| 568 |
/* Accordion styling */
|
| 569 |
.gr-accordion {
|
| 570 |
border-color: var(--medical-blue-light) !important;
|
| 571 |
}
|
| 572 |
-
|
| 573 |
/* Slider track in medical blue */
|
| 574 |
input[type="range"]::-webkit-slider-track {
|
| 575 |
background: var(--bg-tertiary) !important;
|
| 576 |
}
|
| 577 |
-
|
| 578 |
input[type="range"]::-webkit-slider-thumb {
|
| 579 |
background: var(--medical-blue) !important;
|
| 580 |
}
|
|
@@ -582,10 +581,10 @@ def create_demo():
|
|
| 582 |
) as demo:
|
| 583 |
gr.Markdown("""
|
| 584 |
# 🏥 Medical Image Analyzer
|
| 585 |
-
|
| 586 |
Supports **DICOM** (.dcm) and all image formats with automatic modality detection!
|
| 587 |
""")
|
| 588 |
-
|
| 589 |
with gr.Row():
|
| 590 |
with gr.Column(scale=1):
|
| 591 |
# File upload - no file type restrictions
|
|
@@ -600,11 +599,11 @@ def create_demo():
|
|
| 600 |
)
|
| 601 |
gr.Markdown("""
|
| 602 |
<small style='color: #666;'>
|
| 603 |
-
Accepts: DICOM (.dcm, .dicom), Images (.png, .jpg, .jpeg, .tiff, .bmp),
|
| 604 |
and files without extensions (e.g., IM_0001, IM_0002, etc.)
|
| 605 |
</small>
|
| 606 |
""")
|
| 607 |
-
|
| 608 |
# Modality selection
|
| 609 |
modality = gr.Radio(
|
| 610 |
choices=["CT", "CR", "DX", "RX", "DR"],
|
|
@@ -612,7 +611,7 @@ def create_demo():
|
|
| 612 |
label="Modality",
|
| 613 |
info="Will be auto-detected for DICOM files"
|
| 614 |
)
|
| 615 |
-
|
| 616 |
# Task selection
|
| 617 |
task = gr.Dropdown(
|
| 618 |
choices=[
|
|
@@ -623,13 +622,13 @@ def create_demo():
|
|
| 623 |
value="full_analysis",
|
| 624 |
label="Analysis Task"
|
| 625 |
)
|
| 626 |
-
|
| 627 |
# ROI settings
|
| 628 |
with gr.Accordion("🎯 Region of Interest (ROI)", open=True):
|
| 629 |
roi_x = gr.Slider(0, 512, 256, label="X Position", step=1)
|
| 630 |
roi_y = gr.Slider(0, 512, 256, label="Y Position", step=1)
|
| 631 |
roi_radius = gr.Slider(5, 50, 10, label="Radius", step=1)
|
| 632 |
-
|
| 633 |
# Clinical context
|
| 634 |
with gr.Accordion("🏥 Clinical Context", open=False):
|
| 635 |
symptoms = gr.CheckboxGroup(
|
|
@@ -639,7 +638,7 @@ def create_demo():
|
|
| 639 |
],
|
| 640 |
label="Symptoms/Indication"
|
| 641 |
)
|
| 642 |
-
|
| 643 |
# Visualization options
|
| 644 |
with gr.Accordion("🎨 Visualization Options", open=True):
|
| 645 |
show_overlay = gr.Checkbox(
|
|
@@ -647,25 +646,25 @@ def create_demo():
|
|
| 647 |
value=True,
|
| 648 |
info="Display ROI circle or fat segmentation info on the image"
|
| 649 |
)
|
| 650 |
-
|
| 651 |
analyze_btn = gr.Button("🔬 Analyze", variant="primary", size="lg")
|
| 652 |
-
|
| 653 |
with gr.Column(scale=2):
|
| 654 |
# Results with tabs for different views
|
| 655 |
with gr.Tab("🖼️ Original Image"):
|
| 656 |
image_display = gr.Image(label="Medical Image", type="numpy")
|
| 657 |
-
|
| 658 |
with gr.Tab("🎯 Overlay View"):
|
| 659 |
overlay_display = gr.Image(label="Image with Overlay", type="numpy")
|
| 660 |
-
|
| 661 |
file_info = gr.Textbox(label="File Information", lines=1)
|
| 662 |
-
|
| 663 |
with gr.Tab("📊 Visual Report"):
|
| 664 |
report_html = gr.HTML()
|
| 665 |
-
|
| 666 |
with gr.Tab("🔧 JSON Output"):
|
| 667 |
json_output = gr.JSON(label="Structured Data for AI Agents")
|
| 668 |
-
|
| 669 |
# Examples and help
|
| 670 |
with gr.Row():
|
| 671 |
gr.Markdown("""
|
|
@@ -674,51 +673,51 @@ def create_demo():
|
|
| 674 |
- **PNG/JPG**: Interpreted based on selected modality
|
| 675 |
- **All Formats**: Automatic grayscale conversion
|
| 676 |
- **Files without extension**: Supported (e.g., IM_0001) - will try DICOM first
|
| 677 |
-
|
| 678 |
### 🎯 Usage
|
| 679 |
1. Upload a medical image file
|
| 680 |
2. Select modality (auto-detected for DICOM)
|
| 681 |
3. Choose analysis task
|
| 682 |
4. Adjust ROI position for point analysis
|
| 683 |
5. Click "Analyze"
|
| 684 |
-
|
| 685 |
### 💡 Features
|
| 686 |
- **ROI Visualization**: See the exact area being analyzed
|
| 687 |
- **Fat Segmentation**: Visual percentages for CT scans
|
| 688 |
- **Multi-format Support**: Works with any medical image format
|
| 689 |
- **AI Agent Ready**: Structured JSON output for integration
|
| 690 |
""")
|
| 691 |
-
|
| 692 |
# Connect the interface
|
| 693 |
analyze_btn.click(
|
| 694 |
fn=process_and_analyze,
|
| 695 |
inputs=[file_input, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay],
|
| 696 |
outputs=[image_display, file_info, report_html, json_output, overlay_display]
|
| 697 |
)
|
| 698 |
-
|
| 699 |
# Auto-update ROI limits when image is loaded
|
| 700 |
def update_roi_on_upload(file_obj):
|
| 701 |
if file_obj is None:
|
| 702 |
return gr.update(), gr.update()
|
| 703 |
-
|
| 704 |
try:
|
| 705 |
analyzer = MedicalImageAnalyzer()
|
| 706 |
_, _, metadata = analyzer.process_file(file_obj.name if hasattr(file_obj, 'name') else str(file_obj))
|
| 707 |
-
|
| 708 |
if 'shape' in metadata:
|
| 709 |
h, w = metadata['shape']
|
| 710 |
return gr.update(maximum=w-1, value=w//2), gr.update(maximum=h-1, value=h//2)
|
| 711 |
except:
|
| 712 |
pass
|
| 713 |
-
|
| 714 |
return gr.update(), gr.update()
|
| 715 |
-
|
| 716 |
file_input.change(
|
| 717 |
fn=update_roi_on_upload,
|
| 718 |
inputs=[file_input],
|
| 719 |
outputs=[roi_x, roi_y]
|
| 720 |
)
|
| 721 |
-
|
| 722 |
return demo
|
| 723 |
|
| 724 |
if __name__ == "__main__":
|
|
@@ -995,3 +994,4 @@ The code snippet below is accurate in cases where the component is used as both
|
|
| 995 |
) -> typing.Dict[str, typing.Any][str, typing.Any]:
|
| 996 |
return value
|
| 997 |
```
|
|
|
|
|
|
| 16 |
- ai-agents
|
| 17 |
---
|
| 18 |
|
|
|
|
| 19 |
# `gradio_medical_image_analyzer`
|
| 20 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.1.0%20-%20orange"> <a href="https://github.com/thedatadudech/gradio-medical-image-analyzer/issues" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/Issues-white?logo=github&logoColor=black"></a> <a href="https://huggingface.co/spaces/AbdullahIsaMarkus/gradio_medical_image_analyzer/discussions" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/%F0%9F%A4%97%20Discuss-%23097EFF?style=flat&logoColor=black"></a>
|
| 21 |
|
| 22 |
+
AI-agent optimized medical image analysis component for Gradio with DICOM support
|
| 23 |
|
| 24 |
## Installation
|
| 25 |
|
|
|
|
| 54 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 55 |
else:
|
| 56 |
image_rgb = image.copy()
|
| 57 |
+
|
| 58 |
# Draw ROI circle
|
| 59 |
center = (int(roi_x), int(roi_y))
|
| 60 |
radius = int(roi_radius)
|
| 61 |
+
|
| 62 |
# Draw outer circle (white)
|
| 63 |
cv2.circle(image_rgb, center, radius, (255, 255, 255), 2)
|
| 64 |
# Draw inner circle (red)
|
|
|
|
| 66 |
# Draw center cross
|
| 67 |
cv2.line(image_rgb, (center[0]-5, center[1]), (center[0]+5, center[1]), (255, 0, 0), 2)
|
| 68 |
cv2.line(image_rgb, (center[0], center[1]-5), (center[0], center[1]+5), (255, 0, 0), 2)
|
| 69 |
+
|
| 70 |
return image_rgb
|
| 71 |
|
| 72 |
def create_fat_overlay(base_image, segmentation_results):
|
|
|
|
| 76 |
overlay_img = cv2.cvtColor(base_image, cv2.COLOR_GRAY2RGB)
|
| 77 |
else:
|
| 78 |
overlay_img = base_image.copy()
|
| 79 |
+
|
| 80 |
# Check if we have segmentation masks
|
| 81 |
if not segmentation_results or 'segments' not in segmentation_results:
|
| 82 |
return overlay_img
|
| 83 |
+
|
| 84 |
segments = segmentation_results.get('segments', {})
|
| 85 |
+
|
| 86 |
# Apply subcutaneous fat overlay (yellow)
|
| 87 |
if 'subcutaneous' in segments and segments['subcutaneous'].get('mask') is not None:
|
| 88 |
mask = segments['subcutaneous']['mask']
|
| 89 |
yellow_overlay = np.zeros_like(overlay_img)
|
| 90 |
yellow_overlay[mask > 0] = [255, 255, 0] # Yellow
|
| 91 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, yellow_overlay, 0.3, 0)
|
| 92 |
+
|
| 93 |
# Apply visceral fat overlay (red)
|
| 94 |
if 'visceral' in segments and segments['visceral'].get('mask') is not None:
|
| 95 |
mask = segments['visceral']['mask']
|
| 96 |
red_overlay = np.zeros_like(overlay_img)
|
| 97 |
red_overlay[mask > 0] = [255, 0, 0] # Red
|
| 98 |
overlay_img = cv2.addWeighted(overlay_img, 0.7, red_overlay, 0.3, 0)
|
| 99 |
+
|
| 100 |
# Add legend
|
| 101 |
+
cv2.putText(overlay_img, "Yellow: Subcutaneous Fat", (10, 30),
|
| 102 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 103 |
+
cv2.putText(overlay_img, "Red: Visceral Fat", (10, 60),
|
| 104 |
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 105 |
+
|
| 106 |
return overlay_img
|
| 107 |
|
| 108 |
def process_and_analyze(file_obj, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay=False):
|
|
|
|
| 111 |
"""
|
| 112 |
if file_obj is None:
|
| 113 |
return None, "No file selected", None, {}, None
|
| 114 |
+
|
| 115 |
# Create analyzer instance
|
| 116 |
analyzer = MedicalImageAnalyzer(
|
| 117 |
analysis_mode="structured",
|
| 118 |
include_confidence=True,
|
| 119 |
include_reasoning=True
|
| 120 |
)
|
| 121 |
+
|
| 122 |
try:
|
| 123 |
# Process the file (DICOM or image)
|
| 124 |
file_path = file_obj.name if hasattr(file_obj, 'name') else str(file_obj)
|
| 125 |
pixel_array, display_array, metadata = analyzer.process_file(file_path)
|
| 126 |
+
|
| 127 |
# Update modality from file metadata if it's a DICOM
|
| 128 |
if metadata.get('file_type') == 'DICOM' and 'modality' in metadata:
|
| 129 |
modality = metadata['modality']
|
| 130 |
+
|
| 131 |
# Prepare analysis parameters
|
| 132 |
analysis_params = {
|
| 133 |
"image": pixel_array,
|
| 134 |
"modality": modality,
|
| 135 |
"task": task
|
| 136 |
}
|
| 137 |
+
|
| 138 |
# Add ROI if applicable
|
| 139 |
if task in ["analyze_point", "full_analysis"]:
|
| 140 |
# Scale ROI coordinates to image size
|
| 141 |
h, w = pixel_array.shape
|
| 142 |
roi_x_scaled = int(roi_x * w / 512) # Assuming slider max is 512
|
| 143 |
roi_y_scaled = int(roi_y * h / 512)
|
| 144 |
+
|
| 145 |
analysis_params["roi"] = {
|
| 146 |
"x": roi_x_scaled,
|
| 147 |
"y": roi_y_scaled,
|
| 148 |
"radius": roi_radius
|
| 149 |
}
|
| 150 |
+
|
| 151 |
# Add clinical context
|
| 152 |
if symptoms:
|
| 153 |
analysis_params["clinical_context"] = {"symptoms": symptoms}
|
| 154 |
+
|
| 155 |
# Perform analysis
|
| 156 |
results = analyzer.analyze_image(**analysis_params)
|
| 157 |
+
|
| 158 |
# Create visual report
|
| 159 |
visual_report = create_visual_report(results, metadata)
|
| 160 |
+
|
| 161 |
# Add metadata info
|
| 162 |
info = f"📄 {metadata.get('file_type', 'Unknown')} | "
|
| 163 |
info += f"🏥 {modality} | "
|
| 164 |
info += f"📐 {metadata.get('shape', 'Unknown')}"
|
| 165 |
+
|
| 166 |
if metadata.get('window_center'):
|
| 167 |
info += f" | Window C:{metadata['window_center']:.0f} W:{metadata['window_width']:.0f}"
|
| 168 |
+
|
| 169 |
# Create overlay image if requested
|
| 170 |
overlay_image = None
|
| 171 |
if show_overlay:
|
| 172 |
# For ROI visualization
|
| 173 |
if task in ["analyze_point", "full_analysis"] and roi_x and roi_y:
|
| 174 |
overlay_image = draw_roi_on_image(display_array.copy(), roi_x_scaled, roi_y_scaled, roi_radius)
|
| 175 |
+
|
| 176 |
# For fat segmentation overlay (simplified version since we don't have masks in current implementation)
|
| 177 |
elif task == "segment_fat" and 'segmentation' in results and modality == 'CT':
|
| 178 |
# For now, just draw ROI since we don't have actual masks
|
|
|
|
| 182 |
# Add text overlay about fat percentages
|
| 183 |
if 'statistics' in results['segmentation']:
|
| 184 |
stats = results['segmentation']['statistics']
|
| 185 |
+
cv2.putText(overlay_image, f"Total Fat: {stats.get('total_fat_percentage', 0):.1f}%",
|
| 186 |
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
| 187 |
+
cv2.putText(overlay_image, f"Subcutaneous: {stats.get('subcutaneous_fat_percentage', 0):.1f}%",
|
| 188 |
(10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2)
|
| 189 |
+
cv2.putText(overlay_image, f"Visceral: {stats.get('visceral_fat_percentage', 0):.1f}%",
|
| 190 |
(10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 191 |
+
|
| 192 |
return display_array, info, visual_report, results, overlay_image
|
| 193 |
+
|
| 194 |
except Exception as e:
|
| 195 |
error_msg = f"Error: {str(e)}"
|
| 196 |
return None, error_msg, f"<div style='color: red;'>❌ {error_msg}</div>", {"error": error_msg}, None
|
|
|
|
| 198 |
def create_visual_report(results, metadata):
|
| 199 |
"""Creates a visual HTML report with improved styling"""
|
| 200 |
html = f"""
|
| 201 |
+
<div class='medical-report' style='font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
| 202 |
+
padding: 24px;
|
| 203 |
+
background: #ffffff;
|
| 204 |
+
border-radius: 12px;
|
| 205 |
+
max-width: 100%;
|
| 206 |
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 207 |
color: #1a1a1a !important;'>
|
| 208 |
+
|
| 209 |
+
<h2 style='color: #1e40af !important;
|
| 210 |
+
border-bottom: 3px solid #3b82f6;
|
| 211 |
+
padding-bottom: 12px;
|
| 212 |
margin-bottom: 20px;
|
| 213 |
font-size: 24px;
|
| 214 |
font-weight: 600;'>
|
| 215 |
🏥 Medical Image Analysis Report
|
| 216 |
</h2>
|
| 217 |
+
|
| 218 |
+
<div style='background: #f0f9ff;
|
| 219 |
+
padding: 20px;
|
| 220 |
+
margin: 16px 0;
|
| 221 |
+
border-radius: 8px;
|
| 222 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 223 |
+
<h3 style='color: #1e3a8a !important;
|
| 224 |
+
font-size: 18px;
|
| 225 |
+
font-weight: 600;
|
| 226 |
margin-bottom: 12px;'>
|
| 227 |
📋 Metadata
|
| 228 |
</h3>
|
|
|
|
| 246 |
</table>
|
| 247 |
</div>
|
| 248 |
"""
|
| 249 |
+
|
| 250 |
# Point Analysis
|
| 251 |
if 'point_analysis' in results:
|
| 252 |
pa = results['point_analysis']
|
| 253 |
tissue = pa.get('tissue_type', {})
|
| 254 |
+
|
| 255 |
html += f"""
|
| 256 |
+
<div style='background: #f0f9ff;
|
| 257 |
+
padding: 20px;
|
| 258 |
+
margin: 16px 0;
|
| 259 |
+
border-radius: 8px;
|
| 260 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 261 |
+
<h3 style='color: #1e3a8a !important;
|
| 262 |
+
font-size: 18px;
|
| 263 |
+
font-weight: 600;
|
| 264 |
margin-bottom: 12px;'>
|
| 265 |
🎯 Point Analysis
|
| 266 |
</h3>
|
|
|
|
| 270 |
<td style='padding: 8px 0; color: #1f2937 !important;'>({pa.get('location', {}).get('x', 'N/A')}, {pa.get('location', {}).get('y', 'N/A')})</td>
|
| 271 |
</tr>
|
| 272 |
"""
|
| 273 |
+
|
| 274 |
if results.get('modality') == 'CT':
|
| 275 |
html += f"""
|
| 276 |
<tr>
|
|
|
|
| 285 |
<td style='padding: 8px 0; color: #1f2937 !important;'>{pa.get('intensity', 'N/A'):.3f}</td>
|
| 286 |
</tr>
|
| 287 |
"""
|
| 288 |
+
|
| 289 |
html += f"""
|
| 290 |
<tr>
|
| 291 |
<td style='padding: 8px 0; color: #4b5563 !important;'><strong style='color: #374151 !important;'>Tissue Type:</strong></td>
|
| 292 |
<td style='padding: 8px 0; color: #1f2937 !important;'>
|
| 293 |
+
<span style='font-size: 1.3em; vertical-align: middle;'>{tissue.get('icon', '')}</span>
|
| 294 |
<span style='font-weight: 500; text-transform: capitalize;'>{tissue.get('type', 'Unknown').replace('_', ' ')}</span>
|
| 295 |
</td>
|
| 296 |
</tr>
|
|
|
|
| 300 |
</tr>
|
| 301 |
</table>
|
| 302 |
"""
|
| 303 |
+
|
| 304 |
if 'reasoning' in pa:
|
| 305 |
html += f"""
|
| 306 |
+
<div style='margin-top: 12px;
|
| 307 |
+
padding: 12px;
|
| 308 |
+
background: #dbeafe;
|
| 309 |
+
border-left: 3px solid #3b82f6;
|
| 310 |
border-radius: 4px;'>
|
| 311 |
<p style='margin: 0; color: #1e40af !important; font-style: italic;'>
|
| 312 |
💭 {pa['reasoning']}
|
| 313 |
</p>
|
| 314 |
</div>
|
| 315 |
"""
|
| 316 |
+
|
| 317 |
html += "</div>"
|
| 318 |
+
|
| 319 |
# Segmentation Results
|
| 320 |
if 'segmentation' in results and results['segmentation']:
|
| 321 |
seg = results['segmentation']
|
| 322 |
+
|
| 323 |
if 'statistics' in seg:
|
| 324 |
# Fat segmentation for CT
|
| 325 |
stats = seg['statistics']
|
| 326 |
html += f"""
|
| 327 |
+
<div style='background: #f0f9ff;
|
| 328 |
+
padding: 20px;
|
| 329 |
+
margin: 16px 0;
|
| 330 |
+
border-radius: 8px;
|
| 331 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 332 |
+
<h3 style='color: #1e3a8a !important;
|
| 333 |
+
font-size: 18px;
|
| 334 |
+
font-weight: 600;
|
| 335 |
margin-bottom: 12px;'>
|
| 336 |
🔬 Fat Segmentation Analysis
|
| 337 |
</h3>
|
|
|
|
| 354 |
</div>
|
| 355 |
</div>
|
| 356 |
"""
|
| 357 |
+
|
| 358 |
if 'interpretation' in seg:
|
| 359 |
interp = seg['interpretation']
|
| 360 |
obesity_color = "#16a34a" if interp.get("obesity_risk") == "normal" else "#d97706" if interp.get("obesity_risk") == "moderate" else "#dc2626"
|
| 361 |
visceral_color = "#16a34a" if interp.get("visceral_risk") == "normal" else "#d97706" if interp.get("visceral_risk") == "moderate" else "#dc2626"
|
| 362 |
+
|
| 363 |
html += f"""
|
| 364 |
<div style='margin-top: 16px; padding: 16px; background: #f3f4f6; border-radius: 6px;'>
|
| 365 |
<h4 style='color: #374151 !important; font-size: 16px; font-weight: 600; margin-bottom: 8px;'>Risk Assessment</h4>
|
|
|
|
| 374 |
</div>
|
| 375 |
</div>
|
| 376 |
"""
|
| 377 |
+
|
| 378 |
if interp.get('recommendations'):
|
| 379 |
html += """
|
| 380 |
<div style='margin-top: 12px; padding-top: 12px; border-top: 1px solid #e5e7eb;'>
|
|
|
|
| 384 |
for rec in interp['recommendations']:
|
| 385 |
html += f"<li style='margin: 4px 0;'>{rec}</li>"
|
| 386 |
html += "</ul></div>"
|
| 387 |
+
|
| 388 |
html += "</div>"
|
| 389 |
html += "</div>"
|
| 390 |
+
|
| 391 |
# Quality Assessment
|
| 392 |
if 'quality_metrics' in results:
|
| 393 |
quality = results['quality_metrics']
|
|
|
|
| 399 |
'unknown': '#6b7280'
|
| 400 |
}
|
| 401 |
q_color = quality_colors.get(quality.get('overall_quality', 'unknown'), '#6b7280')
|
| 402 |
+
|
| 403 |
html += f"""
|
| 404 |
+
<div style='background: #f0f9ff;
|
| 405 |
+
padding: 20px;
|
| 406 |
+
margin: 16px 0;
|
| 407 |
+
border-radius: 8px;
|
| 408 |
box-shadow: 0 1px 3px rgba(0,0,0,0.1);'>
|
| 409 |
+
<h3 style='color: #1e3a8a !important;
|
| 410 |
+
font-size: 18px;
|
| 411 |
+
font-weight: 600;
|
| 412 |
margin-bottom: 12px;'>
|
| 413 |
📊 Image Quality Assessment
|
| 414 |
</h3>
|
| 415 |
<div style='display: flex; align-items: center; gap: 16px;'>
|
| 416 |
<div>
|
| 417 |
<span style='color: #4b5563 !important; font-size: 14px;'>Overall Quality:</span>
|
| 418 |
+
<span style='color: {q_color} !important;
|
| 419 |
+
font-size: 18px;
|
| 420 |
+
font-weight: 700;
|
| 421 |
margin-left: 8px;'>
|
| 422 |
{quality.get('overall_quality', 'unknown').upper()}
|
| 423 |
</span>
|
| 424 |
</div>
|
| 425 |
</div>
|
| 426 |
"""
|
| 427 |
+
|
| 428 |
if quality.get('issues'):
|
| 429 |
html += f"""
|
| 430 |
+
<div style='margin-top: 12px;
|
| 431 |
+
padding: 12px;
|
| 432 |
+
background: #fef3c7;
|
| 433 |
+
border-left: 3px solid #f59e0b;
|
| 434 |
border-radius: 4px;'>
|
| 435 |
<strong style='color: #92400e !important;'>Issues Detected:</strong>
|
| 436 |
<ul style='margin: 4px 0 0 0; padding-left: 20px; color: #92400e !important;'>
|
|
|
|
| 438 |
for issue in quality['issues']:
|
| 439 |
html += f"<li style='margin: 2px 0;'>{issue}</li>"
|
| 440 |
html += "</ul></div>"
|
| 441 |
+
|
| 442 |
html += "</div>"
|
| 443 |
+
|
| 444 |
html += "</div>"
|
| 445 |
return html
|
| 446 |
|
| 447 |
def create_demo():
|
| 448 |
with gr.Blocks(
|
| 449 |
+
title="Medical Image Analyzer - Enhanced Demo",
|
| 450 |
theme=gr.themes.Soft(
|
| 451 |
primary_hue="blue",
|
| 452 |
secondary_hue="blue",
|
|
|
|
| 485 |
--bg-secondary: #1e293b;
|
| 486 |
--bg-tertiary: #334155;
|
| 487 |
}
|
| 488 |
+
|
| 489 |
/* Override default text colors for medical theme */
|
| 490 |
* {
|
| 491 |
color: var(--text-primary) !important;
|
| 492 |
}
|
| 493 |
+
|
| 494 |
/* Style the file upload area */
|
| 495 |
.file-upload {
|
| 496 |
border: 2px dashed var(--medical-blue-light) !important;
|
|
|
|
| 501 |
transition: all 0.3s ease !important;
|
| 502 |
color: var(--text-primary) !important;
|
| 503 |
}
|
| 504 |
+
|
| 505 |
.file-upload:hover {
|
| 506 |
border-color: var(--medical-blue) !important;
|
| 507 |
background: var(--bg-tertiary) !important;
|
| 508 |
box-shadow: 0 0 20px rgba(59, 130, 246, 0.2) !important;
|
| 509 |
}
|
| 510 |
+
|
| 511 |
/* Ensure report text is readable with white background */
|
| 512 |
.medical-report {
|
| 513 |
background: #ffffff !important;
|
|
|
|
| 516 |
padding: 16px !important;
|
| 517 |
color: #1a1a1a !important;
|
| 518 |
}
|
| 519 |
+
|
| 520 |
.medical-report * {
|
| 521 |
color: #1f2937 !important; /* Dark gray text */
|
| 522 |
}
|
| 523 |
+
|
| 524 |
.medical-report h2 {
|
| 525 |
color: #1e40af !important; /* Medical blue for main heading */
|
| 526 |
}
|
| 527 |
+
|
| 528 |
.medical-report h3, .medical-report h4 {
|
| 529 |
color: #1e3a8a !important; /* Darker medical blue for subheadings */
|
| 530 |
}
|
| 531 |
+
|
| 532 |
.medical-report strong {
|
| 533 |
color: #374151 !important; /* Darker gray for labels */
|
| 534 |
}
|
| 535 |
+
|
| 536 |
.medical-report td {
|
| 537 |
color: #1f2937 !important; /* Ensure table text is dark */
|
| 538 |
}
|
| 539 |
+
|
| 540 |
/* Report sections with light blue background */
|
| 541 |
.medical-report > div {
|
| 542 |
background: #f0f9ff !important;
|
| 543 |
color: #1f2937 !important;
|
| 544 |
}
|
| 545 |
+
|
| 546 |
/* Medical blue accents for UI elements */
|
| 547 |
.gr-button-primary {
|
| 548 |
background: var(--medical-blue) !important;
|
| 549 |
border-color: var(--medical-blue) !important;
|
| 550 |
}
|
| 551 |
+
|
| 552 |
.gr-button-primary:hover {
|
| 553 |
background: var(--medical-blue-dark) !important;
|
| 554 |
border-color: var(--medical-blue-dark) !important;
|
| 555 |
}
|
| 556 |
+
|
| 557 |
/* Tab styling */
|
| 558 |
.gr-tab-item {
|
| 559 |
border-color: var(--medical-blue-light) !important;
|
| 560 |
}
|
| 561 |
+
|
| 562 |
.gr-tab-item.selected {
|
| 563 |
background: var(--medical-blue) !important;
|
| 564 |
color: white !important;
|
| 565 |
}
|
| 566 |
+
|
| 567 |
/* Accordion styling */
|
| 568 |
.gr-accordion {
|
| 569 |
border-color: var(--medical-blue-light) !important;
|
| 570 |
}
|
| 571 |
+
|
| 572 |
/* Slider track in medical blue */
|
| 573 |
input[type="range"]::-webkit-slider-track {
|
| 574 |
background: var(--bg-tertiary) !important;
|
| 575 |
}
|
| 576 |
+
|
| 577 |
input[type="range"]::-webkit-slider-thumb {
|
| 578 |
background: var(--medical-blue) !important;
|
| 579 |
}
|
|
|
|
| 581 |
) as demo:
|
| 582 |
gr.Markdown("""
|
| 583 |
# 🏥 Medical Image Analyzer
|
| 584 |
+
|
| 585 |
Supports **DICOM** (.dcm) and all image formats with automatic modality detection!
|
| 586 |
""")
|
| 587 |
+
|
| 588 |
with gr.Row():
|
| 589 |
with gr.Column(scale=1):
|
| 590 |
# File upload - no file type restrictions
|
|
|
|
| 599 |
)
|
| 600 |
gr.Markdown("""
|
| 601 |
<small style='color: #666;'>
|
| 602 |
+
Accepts: DICOM (.dcm, .dicom), Images (.png, .jpg, .jpeg, .tiff, .bmp),
|
| 603 |
and files without extensions (e.g., IM_0001, IM_0002, etc.)
|
| 604 |
</small>
|
| 605 |
""")
|
| 606 |
+
|
| 607 |
# Modality selection
|
| 608 |
modality = gr.Radio(
|
| 609 |
choices=["CT", "CR", "DX", "RX", "DR"],
|
|
|
|
| 611 |
label="Modality",
|
| 612 |
info="Will be auto-detected for DICOM files"
|
| 613 |
)
|
| 614 |
+
|
| 615 |
# Task selection
|
| 616 |
task = gr.Dropdown(
|
| 617 |
choices=[
|
|
|
|
| 622 |
value="full_analysis",
|
| 623 |
label="Analysis Task"
|
| 624 |
)
|
| 625 |
+
|
| 626 |
# ROI settings
|
| 627 |
with gr.Accordion("🎯 Region of Interest (ROI)", open=True):
|
| 628 |
roi_x = gr.Slider(0, 512, 256, label="X Position", step=1)
|
| 629 |
roi_y = gr.Slider(0, 512, 256, label="Y Position", step=1)
|
| 630 |
roi_radius = gr.Slider(5, 50, 10, label="Radius", step=1)
|
| 631 |
+
|
| 632 |
# Clinical context
|
| 633 |
with gr.Accordion("🏥 Clinical Context", open=False):
|
| 634 |
symptoms = gr.CheckboxGroup(
|
|
|
|
| 638 |
],
|
| 639 |
label="Symptoms/Indication"
|
| 640 |
)
|
| 641 |
+
|
| 642 |
# Visualization options
|
| 643 |
with gr.Accordion("🎨 Visualization Options", open=True):
|
| 644 |
show_overlay = gr.Checkbox(
|
|
|
|
| 646 |
value=True,
|
| 647 |
info="Display ROI circle or fat segmentation info on the image"
|
| 648 |
)
|
| 649 |
+
|
| 650 |
analyze_btn = gr.Button("🔬 Analyze", variant="primary", size="lg")
|
| 651 |
+
|
| 652 |
with gr.Column(scale=2):
|
| 653 |
# Results with tabs for different views
|
| 654 |
with gr.Tab("🖼️ Original Image"):
|
| 655 |
image_display = gr.Image(label="Medical Image", type="numpy")
|
| 656 |
+
|
| 657 |
with gr.Tab("🎯 Overlay View"):
|
| 658 |
overlay_display = gr.Image(label="Image with Overlay", type="numpy")
|
| 659 |
+
|
| 660 |
file_info = gr.Textbox(label="File Information", lines=1)
|
| 661 |
+
|
| 662 |
with gr.Tab("📊 Visual Report"):
|
| 663 |
report_html = gr.HTML()
|
| 664 |
+
|
| 665 |
with gr.Tab("🔧 JSON Output"):
|
| 666 |
json_output = gr.JSON(label="Structured Data for AI Agents")
|
| 667 |
+
|
| 668 |
# Examples and help
|
| 669 |
with gr.Row():
|
| 670 |
gr.Markdown("""
|
|
|
|
| 673 |
- **PNG/JPG**: Interpreted based on selected modality
|
| 674 |
- **All Formats**: Automatic grayscale conversion
|
| 675 |
- **Files without extension**: Supported (e.g., IM_0001) - will try DICOM first
|
| 676 |
+
|
| 677 |
### 🎯 Usage
|
| 678 |
1. Upload a medical image file
|
| 679 |
2. Select modality (auto-detected for DICOM)
|
| 680 |
3. Choose analysis task
|
| 681 |
4. Adjust ROI position for point analysis
|
| 682 |
5. Click "Analyze"
|
| 683 |
+
|
| 684 |
### 💡 Features
|
| 685 |
- **ROI Visualization**: See the exact area being analyzed
|
| 686 |
- **Fat Segmentation**: Visual percentages for CT scans
|
| 687 |
- **Multi-format Support**: Works with any medical image format
|
| 688 |
- **AI Agent Ready**: Structured JSON output for integration
|
| 689 |
""")
|
| 690 |
+
|
| 691 |
# Connect the interface
|
| 692 |
analyze_btn.click(
|
| 693 |
fn=process_and_analyze,
|
| 694 |
inputs=[file_input, modality, task, roi_x, roi_y, roi_radius, symptoms, show_overlay],
|
| 695 |
outputs=[image_display, file_info, report_html, json_output, overlay_display]
|
| 696 |
)
|
| 697 |
+
|
| 698 |
# Auto-update ROI limits when image is loaded
|
| 699 |
def update_roi_on_upload(file_obj):
|
| 700 |
if file_obj is None:
|
| 701 |
return gr.update(), gr.update()
|
| 702 |
+
|
| 703 |
try:
|
| 704 |
analyzer = MedicalImageAnalyzer()
|
| 705 |
_, _, metadata = analyzer.process_file(file_obj.name if hasattr(file_obj, 'name') else str(file_obj))
|
| 706 |
+
|
| 707 |
if 'shape' in metadata:
|
| 708 |
h, w = metadata['shape']
|
| 709 |
return gr.update(maximum=w-1, value=w//2), gr.update(maximum=h-1, value=h//2)
|
| 710 |
except:
|
| 711 |
pass
|
| 712 |
+
|
| 713 |
return gr.update(), gr.update()
|
| 714 |
+
|
| 715 |
file_input.change(
|
| 716 |
fn=update_roi_on_upload,
|
| 717 |
inputs=[file_input],
|
| 718 |
outputs=[roi_x, roi_y]
|
| 719 |
)
|
| 720 |
+
|
| 721 |
return demo
|
| 722 |
|
| 723 |
if __name__ == "__main__":
|
|
|
|
| 994 |
) -> typing.Dict[str, typing.Any][str, typing.Any]:
|
| 995 |
return value
|
| 996 |
```
|
| 997 |
+
|
src/backend/gradio_medical_image_analyzer/medical_image_analyzer.py
CHANGED
|
@@ -494,16 +494,22 @@ class MedicalImageAnalyzer(Component):
|
|
| 494 |
# How close is center value to mean
|
| 495 |
center_deviation = abs(center_value - mean) / std
|
| 496 |
|
| 497 |
-
# Coefficient of variation
|
| 498 |
cv = std / (abs(mean) + 1e-6)
|
| 499 |
|
| 500 |
-
#
|
| 501 |
-
#
|
| 502 |
-
|
| 503 |
|
| 504 |
-
#
|
| 505 |
-
|
| 506 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 507 |
|
| 508 |
return round(confidence, 2)
|
| 509 |
|
|
|
|
| 494 |
# How close is center value to mean
|
| 495 |
center_deviation = abs(center_value - mean) / std
|
| 496 |
|
| 497 |
+
# Coefficient of variation (normalized)
|
| 498 |
cv = std / (abs(mean) + 1e-6)
|
| 499 |
|
| 500 |
+
# Base confidence from homogeneity (sigmoid-like transformation)
|
| 501 |
+
# CV of 0.1 = ~95% confidence, CV of 0.5 = ~70% confidence, CV of 1.0 = ~50% confidence
|
| 502 |
+
base_confidence = 1.0 / (1.0 + cv * 2.0)
|
| 503 |
|
| 504 |
+
# Adjust based on center deviation
|
| 505 |
+
# If center is close to mean, increase confidence
|
| 506 |
+
deviation_factor = 1.0 / (1.0 + center_deviation * 0.5)
|
| 507 |
+
|
| 508 |
+
# Combine factors
|
| 509 |
+
confidence = base_confidence * 0.7 + deviation_factor * 0.3
|
| 510 |
+
|
| 511 |
+
# Ensure reasonable minimum confidence for valid detections
|
| 512 |
+
confidence = max(0.5, min(0.99, confidence))
|
| 513 |
|
| 514 |
return round(confidence, 2)
|
| 515 |
|
src/backend/gradio_medical_image_analyzer/medical_image_analyzer.pyi
CHANGED
|
@@ -488,16 +488,22 @@ class MedicalImageAnalyzer(Component):
|
|
| 488 |
# How close is center value to mean
|
| 489 |
center_deviation = abs(center_value - mean) / std
|
| 490 |
|
| 491 |
-
# Coefficient of variation
|
| 492 |
cv = std / (abs(mean) + 1e-6)
|
| 493 |
|
| 494 |
-
#
|
| 495 |
-
#
|
| 496 |
-
|
| 497 |
|
| 498 |
-
#
|
| 499 |
-
|
| 500 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 501 |
|
| 502 |
return round(confidence, 2)
|
| 503 |
|
|
|
|
| 488 |
# How close is center value to mean
|
| 489 |
center_deviation = abs(center_value - mean) / std
|
| 490 |
|
| 491 |
+
# Coefficient of variation (normalized)
|
| 492 |
cv = std / (abs(mean) + 1e-6)
|
| 493 |
|
| 494 |
+
# Base confidence from homogeneity (sigmoid-like transformation)
|
| 495 |
+
# CV of 0.1 = ~95% confidence, CV of 0.5 = ~70% confidence, CV of 1.0 = ~50% confidence
|
| 496 |
+
base_confidence = 1.0 / (1.0 + cv * 2.0)
|
| 497 |
|
| 498 |
+
# Adjust based on center deviation
|
| 499 |
+
# If center is close to mean, increase confidence
|
| 500 |
+
deviation_factor = 1.0 / (1.0 + center_deviation * 0.5)
|
| 501 |
+
|
| 502 |
+
# Combine factors
|
| 503 |
+
confidence = base_confidence * 0.7 + deviation_factor * 0.3
|
| 504 |
+
|
| 505 |
+
# Ensure reasonable minimum confidence for valid detections
|
| 506 |
+
confidence = max(0.5, min(0.99, confidence))
|
| 507 |
|
| 508 |
return round(confidence, 2)
|
| 509 |
|
src/demo/README.md
CHANGED
|
@@ -3,11 +3,30 @@
|
|
| 3 |
|
| 4 |
This folder contains demo applications for the `gradio_medical_image_analyzer` custom component.
|
| 5 |
|
|
|
|
| 6 |
## ⚠️ IMPORTANT MEDICAL DISCLAIMER ⚠️
|
| 7 |
|
| 8 |
**THIS SOFTWARE IS FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY**
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
## Demo Files
|
| 13 |
|
|
@@ -87,7 +106,7 @@ If you encounter issues:
|
|
| 87 |
1. Ensure all dependencies are installed: `pip install -r requirements.txt`
|
| 88 |
2. Check that you have the correct Python version (3.8+)
|
| 89 |
3. For DICOM files, ensure they are valid medical images
|
| 90 |
-
4. Report issues at: https://github.com/
|
| 91 |
|
| 92 |
---
|
| 93 |
|
|
|
|
| 3 |
|
| 4 |
This folder contains demo applications for the `gradio_medical_image_analyzer` custom component.
|
| 5 |
|
| 6 |
+
|
| 7 |
## ⚠️ IMPORTANT MEDICAL DISCLAIMER ⚠️
|
| 8 |
|
| 9 |
**THIS SOFTWARE IS FOR RESEARCH AND EDUCATIONAL PURPOSES ONLY**
|
| 10 |
|
| 11 |
+
🚨 **DO NOT USE FOR CLINICAL DIAGNOSIS OR MEDICAL DECISION MAKING** 🚨
|
| 12 |
+
|
| 13 |
+
This component is in **EARLY DEVELOPMENT** and is intended as a **proof of concept** for medical image analysis integration with Gradio. The results produced by this software:
|
| 14 |
+
|
| 15 |
+
- **ARE NOT** validated for clinical use
|
| 16 |
+
- **ARE NOT** FDA approved or CE marked
|
| 17 |
+
- **SHOULD NOT** be used for patient diagnosis or treatment decisions
|
| 18 |
+
- **SHOULD NOT** replace professional medical judgment
|
| 19 |
+
- **MAY CONTAIN** significant errors or inaccuracies
|
| 20 |
+
- **ARE PROVIDED** without any warranty of accuracy or fitness for medical purposes
|
| 21 |
+
|
| 22 |
+
**ALWAYS CONSULT QUALIFIED HEALTHCARE PROFESSIONALS** for medical image interpretation and clinical decisions. This software is intended solely for:
|
| 23 |
+
- Research and development purposes
|
| 24 |
+
- Educational demonstrations
|
| 25 |
+
- Technical integration testing
|
| 26 |
+
- Non-clinical experimental use
|
| 27 |
+
|
| 28 |
+
By using this software, you acknowledge that you understand these limitations and agree not to use it for any clinical or medical diagnostic purposes.
|
| 29 |
+
|
| 30 |
|
| 31 |
## Demo Files
|
| 32 |
|
|
|
|
| 106 |
1. Ensure all dependencies are installed: `pip install -r requirements.txt`
|
| 107 |
2. Check that you have the correct Python version (3.8+)
|
| 108 |
3. For DICOM files, ensure they are valid medical images
|
| 109 |
+
4. Report issues at: https://github.com/thedatadudech/gradio-medical-image-analyzer/issues
|
| 110 |
|
| 111 |
---
|
| 112 |
|
src/demo/space.py
CHANGED
|
@@ -21,10 +21,10 @@ with gr.Blocks(
|
|
| 21 |
# `gradio_medical_image_analyzer`
|
| 22 |
|
| 23 |
<div style="display: flex; gap: 7px;">
|
| 24 |
-
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0
|
| 25 |
</div>
|
| 26 |
|
| 27 |
-
AI-agent optimized medical image analysis component for Gradio
|
| 28 |
""", elem_classes=["md-custom"], header_links=True)
|
| 29 |
app.render()
|
| 30 |
gr.Markdown(
|
|
|
|
| 21 |
# `gradio_medical_image_analyzer`
|
| 22 |
|
| 23 |
<div style="display: flex; gap: 7px;">
|
| 24 |
+
<img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.1.0%20-%20orange"> <a href="https://github.com/thedatadudech/gradio-medical-image-analyzer/issues" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/Issues-white?logo=github&logoColor=black"></a> <a href="https://huggingface.co/spaces/AbdullahIsaMarkus/gradio_medical_image_analyzer/discussions" target="_blank"><img alt="Static Badge" src="https://img.shields.io/badge/%F0%9F%A4%97%20Discuss-%23097EFF?style=flat&logoColor=black"></a>
|
| 25 |
</div>
|
| 26 |
|
| 27 |
+
AI-agent optimized medical image analysis component for Gradio with DICOM support
|
| 28 |
""", elem_classes=["md-custom"], header_links=True)
|
| 29 |
app.render()
|
| 30 |
gr.Markdown(
|
src/pyproject.toml
CHANGED
|
@@ -8,10 +8,10 @@ build-backend = "hatchling.build"
|
|
| 8 |
|
| 9 |
[project]
|
| 10 |
name = "gradio_medical_image_analyzer"
|
| 11 |
-
version = "0.0
|
| 12 |
-
description = "AI-agent optimized medical image analysis component for Gradio"
|
| 13 |
readme = "README.md"
|
| 14 |
-
license = "Apache-2.0"
|
| 15 |
requires-python = ">=3.8"
|
| 16 |
authors = [{ name = "Markus Clauss Vetsuisse Uni Zurich", email = "markus@data-and-ai-dude.ch" }]
|
| 17 |
keywords = [
|
|
@@ -19,7 +19,14 @@ keywords = [
|
|
| 19 |
"medical-imaging",
|
| 20 |
"ai-agents",
|
| 21 |
"image-analysis",
|
| 22 |
-
"gradio-template-MedicalImageAnalyzer"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
]
|
| 24 |
# Add dependencies here
|
| 25 |
dependencies = [
|
|
@@ -40,8 +47,15 @@ classifiers = [
|
|
| 40 |
'Programming Language :: Python :: 3.9',
|
| 41 |
'Programming Language :: Python :: 3.10',
|
| 42 |
'Programming Language :: Python :: 3.11',
|
|
|
|
| 43 |
'Topic :: Scientific/Engineering :: Medical Science Apps.',
|
| 44 |
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
]
|
| 46 |
|
| 47 |
[project.urls]
|
|
|
|
| 8 |
|
| 9 |
[project]
|
| 10 |
name = "gradio_medical_image_analyzer"
|
| 11 |
+
version = "0.1.0"
|
| 12 |
+
description = "AI-agent optimized medical image analysis component for Gradio with DICOM support"
|
| 13 |
readme = "README.md"
|
| 14 |
+
license = {text = "Apache-2.0"}
|
| 15 |
requires-python = ">=3.8"
|
| 16 |
authors = [{ name = "Markus Clauss Vetsuisse Uni Zurich", email = "markus@data-and-ai-dude.ch" }]
|
| 17 |
keywords = [
|
|
|
|
| 19 |
"medical-imaging",
|
| 20 |
"ai-agents",
|
| 21 |
"image-analysis",
|
| 22 |
+
"gradio-template-MedicalImageAnalyzer",
|
| 23 |
+
"dicom",
|
| 24 |
+
"veterinary",
|
| 25 |
+
"ct-scan",
|
| 26 |
+
"x-ray",
|
| 27 |
+
"fat-segmentation",
|
| 28 |
+
"medical-ai",
|
| 29 |
+
"hackathon-2025"
|
| 30 |
]
|
| 31 |
# Add dependencies here
|
| 32 |
dependencies = [
|
|
|
|
| 47 |
'Programming Language :: Python :: 3.9',
|
| 48 |
'Programming Language :: Python :: 3.10',
|
| 49 |
'Programming Language :: Python :: 3.11',
|
| 50 |
+
'Programming Language :: Python :: 3.12',
|
| 51 |
'Topic :: Scientific/Engineering :: Medical Science Apps.',
|
| 52 |
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
| 53 |
+
'Topic :: Scientific/Engineering :: Image Processing',
|
| 54 |
+
'Topic :: Software Development :: Libraries :: Python Modules',
|
| 55 |
+
'Intended Audience :: Healthcare Industry',
|
| 56 |
+
'Intended Audience :: Science/Research',
|
| 57 |
+
'Intended Audience :: Developers',
|
| 58 |
+
'License :: OSI Approved :: Apache Software License',
|
| 59 |
]
|
| 60 |
|
| 61 |
[project.urls]
|