mmrech commited on
Commit
9fe9eb3
Β·
verified Β·
1 Parent(s): 883dce8

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +288 -192
app.py CHANGED
@@ -1,29 +1,218 @@
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  from PIL import Image, ImageFilter, ImageEnhance, ImageOps
4
  from transformers import pipeline
 
 
 
 
 
5
  from segment_neuroimaging import (
6
  segment_nph, segment_ventricles, compute_evans_index,
7
  compute_callosal_angle, compute_temporal_horn_width,
8
  compute_third_ventricle_width, score_pvh, assess_desh,
9
  create_overlay, add_annotations, create_comparison,
10
- preprocess_image, create_roi_mask, Modality, COLORS
 
 
11
  )
12
- import cv2
13
- import json
14
- import tempfile
15
- import os
16
 
17
- # ---- Load models (cached on first use) ----
18
  classifier = pipeline("image-classification", model="google/vit-base-patch16-224")
19
  detector = pipeline("object-detection", model="facebook/detr-resnet-50")
20
  segmenter = pipeline("image-segmentation", model="facebook/detr-resnet-50-panoptic")
21
 
22
- # ---- Tab 1: Filters & Effects ----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def apply_filter(image, effect, intensity):
24
  if image is None:
25
  raise gr.Error("Please upload an image first.")
26
-
27
  img = Image.fromarray(image)
28
 
29
  if effect == "Grayscale":
@@ -60,26 +249,23 @@ def apply_filter(image, effect, intensity):
60
  filtered = enhancer.enhance(0.5 + intensity * 2)
61
  else:
62
  filtered = img
63
-
64
  return np.array(filtered)
65
 
66
 
67
- # ---- Tab 2: Image Classification ----
 
 
 
68
  def classify_image(image):
69
  if image is None:
70
  raise gr.Error("Please upload an image first.")
71
- img = Image.fromarray(image)
72
- results = classifier(img)
73
  return {r["label"]: r["score"] for r in results}
74
 
75
-
76
- # ---- Tab 3: Object Detection ----
77
  def detect_objects(image, threshold):
78
  if image is None:
79
  raise gr.Error("Please upload an image first.")
80
- img = Image.fromarray(image)
81
- results = detector(img, threshold=threshold)
82
-
83
  annotations = []
84
  for r in results:
85
  box = r["box"]
@@ -87,170 +273,70 @@ def detect_objects(image, threshold):
87
  (box["xmin"], box["ymin"], box["xmax"], box["ymax"]),
88
  f"{r['label']} ({r['score']:.0%})"
89
  ))
90
-
91
  return (image, annotations)
92
 
93
-
94
- # ---- Tab 4: General Segmentation ----
95
  def segment_image(image):
96
  if image is None:
97
  raise gr.Error("Please upload an image first.")
98
- img = Image.fromarray(image)
99
- results = segmenter(img)
100
-
101
  annotations = []
102
  for r in results:
103
  mask = np.array(r["mask"])
104
  annotations.append((mask, r["label"]))
105
-
106
  return (image, annotations)
107
 
108
 
109
- # ---- Tab 5: NPH Neuroimaging Analysis ----
110
- def analyze_nph(image, modality, overlay_alpha):
111
- if image is None:
112
- raise gr.Error("Please upload a brain MRI or CT image first.")
113
-
114
- # Save temp file for the segmentation pipeline
115
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
116
- Image.fromarray(image).save(f.name)
117
- temp_path = f.name
118
 
119
- try:
120
- # Map display name to modality key
121
- modality_map = {
122
- "Axial FLAIR": "FLAIR",
123
- "Axial T1": "T1",
124
- "Axial T2": "T2",
125
- "Coronal T2": "T2",
126
- "Axial T2 FFE": "T2",
127
- "Sagittal T1": "T1",
128
- "CT Head": "CT_HEAD",
129
- }
130
- mod_key = modality_map.get(modality, "T1")
131
- is_coronal = "Coronal" in modality
132
-
133
- result = segment_nph(
134
- temp_path,
135
- modality=mod_key,
136
- compute_biomarkers=True,
137
- )
138
-
139
- # Build biomarker report
140
- meta = result.metadata
141
- report_lines = ["## NPH Biomarker Report\n"]
142
-
143
- # Evans' Index
144
- ei = meta.get("evans_index")
145
- if ei is not None:
146
- status = "**ABNORMAL (>0.3)**" if ei > 0.3 else "Normal"
147
- report_lines.append(f"**Evans' Index:** {ei:.3f} -- {status}")
148
-
149
- # Temporal Horn Width
150
- thw = meta.get("temporal_horn_width_px", 0)
151
- if thw > 0:
152
- report_lines.append(f"**Temporal Horn Width:** {thw} px")
153
-
154
- # Third Ventricle Width
155
- tvw = meta.get("third_ventricle_width_px", 0)
156
- if tvw > 0:
157
- report_lines.append(f"**Third Ventricle Width:** {tvw} px")
158
-
159
- # PVH Grade (FLAIR only)
160
- pvh = meta.get("pvh_grade")
161
- if pvh is not None:
162
- grade_desc = {0: "None", 1: "Pencil-thin rim", 2: "Smooth halo", 3: "Irregular, deep WM"}
163
- report_lines.append(f"**PVH Grade:** {pvh}/3 -- {grade_desc.get(pvh, '')}")
164
-
165
- # DESH Assessment
166
- desh = meta.get("is_desh_positive")
167
- if desh is not None:
168
- desh_str = "**POSITIVE**" if desh else "Negative"
169
- desh_score = meta.get("desh_total_score", "")
170
- report_lines.append(f"**DESH Pattern:** {desh_str} (score: {desh_score}/6)")
171
-
172
- # DESH details
173
- desh_details = meta.get("desh_details")
174
- if desh_details:
175
- report_lines.append(f"\n### DESH Components")
176
- report_lines.append(f"- Ventriculomegaly: {desh_details.get('ventriculomegaly_score', 'N/A')}/2")
177
- report_lines.append(f"- Sylvian dilation: {desh_details.get('sylvian_dilation_score', 'N/A')}/2")
178
- report_lines.append(f"- Convexity tightness: {desh_details.get('convexity_tightness_score', 'N/A')}/2")
179
-
180
- # Callosal angle for coronal images
181
- if is_coronal:
182
- vent_mask = result.masks.get("ventricles")
183
- if vent_mask is not None:
184
- ca_data = compute_callosal_angle(vent_mask)
185
- ca = ca_data.get("callosal_angle_deg")
186
- if ca is not None:
187
- ca_status = "Suggestive of NPH" if ca < 90 else ("Indeterminate" if ca < 120 else "Normal/ex vacuo")
188
- report_lines.append(f"\n**Callosal Angle:** {ca:.1f} deg -- {ca_status}")
189
-
190
- report_lines.append("\n---")
191
- report_lines.append("*Structures segmented:* " + ", ".join(meta.get("structures_found", [])))
192
- report_lines.append("\n*Note: Pixel-based measurements without DICOM spacing are approximate.*")
193
-
194
- report = "\n".join(report_lines)
195
-
196
- # Build overlay with custom alpha
197
- display_masks = {k: v for k, v in result.masks.items() if k != "skull"}
198
- img_rgb, gray, _ = preprocess_image(temp_path)
199
- overlay = create_overlay(img_rgb, display_masks, alpha=overlay_alpha)
200
-
201
- biomarkers_for_annotation = {
202
- k: v for k, v in meta.items()
203
- if k in ("evans_index", "callosal_angle_deg", "temporal_horn_width_px",
204
- "pvh_grade", "is_desh_positive")
205
- }
206
- annotated = add_annotations(overlay, display_masks, f"{modality} -- NPH Analysis", biomarkers_for_annotation)
207
-
208
- # Side-by-side comparison
209
- comparison = create_comparison(img_rgb, annotated, f"{modality} -- NPH Analysis")
210
-
211
- return annotated, comparison, report
212
-
213
- finally:
214
- os.unlink(temp_path)
215
-
216
-
217
- # ---- Build the UI ----
218
  css = """
219
  .main-title { text-align: center; margin-bottom: 0.5em; }
220
  .subtitle { text-align: center; color: #666; margin-top: 0; }
 
221
  """
222
 
223
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
224
  gr.Markdown("# Image Processing Studio", elem_classes="main-title")
225
  gr.Markdown(
226
- "Upload an image and explore filters, classification, object detection, segmentation, and **NPH neuroimaging analysis** -- all powered by state-of-the-art models.",
227
  elem_classes="subtitle"
228
  )
229
 
 
230
  with gr.Tab("NPH Analysis"):
231
  gr.Markdown(
232
- "### Normal Pressure Hydrocephalus -- Neuroimaging Segmentation & Biomarkers\n"
233
- "Upload a brain MRI or CT scan to compute Evans' index, DESH pattern, PVH scoring, "
234
- "callosal angle (coronal), and more."
 
 
235
  )
236
  with gr.Row():
237
- with gr.Column():
238
  nph_input = gr.Image(label="Upload Brain Scan", type="numpy")
239
  nph_modality = gr.Dropdown(
240
- choices=[
241
- "Axial FLAIR", "Axial T1", "Axial T2",
242
- "Coronal T2", "Axial T2 FFE", "Sagittal T1",
243
- "CT Head"
244
- ],
245
  value="Axial FLAIR",
246
  label="Modality / Sequence"
247
  )
 
 
 
 
248
  nph_alpha = gr.Slider(
249
  minimum=0.1, maximum=0.9, value=0.45, step=0.05,
250
  label="Overlay Opacity"
251
  )
252
- nph_btn = gr.Button("Analyze for NPH", variant="primary")
253
- with gr.Column():
 
 
 
 
 
 
254
  nph_overlay = gr.Image(label="Segmentation Overlay", type="numpy")
255
  nph_comparison = gr.Image(label="Side-by-Side Comparison", type="numpy")
256
 
@@ -258,48 +344,74 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
258
 
259
  nph_btn.click(
260
  fn=analyze_nph,
261
- inputs=[nph_input, nph_modality, nph_alpha],
262
  outputs=[nph_overlay, nph_comparison, nph_report]
263
  )
264
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  gr.Markdown(
266
- "#### NPH Reference Values\n"
267
- "| Biomarker | Normal | Suggestive of NPH |\n"
268
- "|---|---|---|\n"
269
- "| Evans' Index | < 0.3 | > 0.3 |\n"
270
- "| Callosal Angle | > 120 deg | < 90 deg |\n"
271
- "| Temporal Horn | < 2 mm | > 5 mm |\n"
272
- "| PVH (FLAIR) | Grade 0 | Grade 2-3 |\n"
273
- "| DESH Pattern | Absent | Present |"
 
 
274
  )
275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  with gr.Tab("Filters & Effects"):
277
  with gr.Row():
278
  with gr.Column():
279
  filter_input = gr.Image(label="Upload Image", type="numpy")
280
  filter_effect = gr.Dropdown(
281
- choices=[
282
- "Grayscale", "Sepia", "Blur", "Sharpen",
283
- "Edge Detect", "Emboss", "Invert", "Posterize",
284
- "Brightness", "Contrast"
285
- ],
286
- value="Sepia",
287
- label="Effect"
288
- )
289
- filter_intensity = gr.Slider(
290
- minimum=0.0, maximum=1.0, value=0.7, step=0.05,
291
- label="Intensity"
292
  )
 
293
  filter_btn = gr.Button("Apply Filter", variant="primary")
294
  with gr.Column():
295
  filter_output = gr.Image(label="Result", type="numpy")
 
296
 
297
- filter_btn.click(
298
- fn=apply_filter,
299
- inputs=[filter_input, filter_effect, filter_intensity],
300
- outputs=filter_output
301
- )
302
-
303
  with gr.Tab("Image Classification"):
304
  with gr.Row():
305
  with gr.Column():
@@ -307,31 +419,20 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
307
  cls_btn = gr.Button("Classify", variant="primary")
308
  with gr.Column():
309
  cls_output = gr.Label(label="Predictions", num_top_classes=5)
 
310
 
311
- cls_btn.click(
312
- fn=classify_image,
313
- inputs=cls_input,
314
- outputs=cls_output
315
- )
316
-
317
  with gr.Tab("Object Detection"):
318
  with gr.Row():
319
  with gr.Column():
320
  det_input = gr.Image(label="Upload Image", type="numpy")
321
- det_threshold = gr.Slider(
322
- minimum=0.1, maximum=0.95, value=0.5, step=0.05,
323
- label="Confidence Threshold"
324
- )
325
  det_btn = gr.Button("Detect Objects", variant="primary")
326
  with gr.Column():
327
  det_output = gr.AnnotatedImage(label="Detections")
 
328
 
329
- det_btn.click(
330
- fn=detect_objects,
331
- inputs=[det_input, det_threshold],
332
- outputs=det_output
333
- )
334
-
335
  with gr.Tab("Segmentation"):
336
  with gr.Row():
337
  with gr.Column():
@@ -339,11 +440,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
339
  seg_btn = gr.Button("Segment", variant="primary")
340
  with gr.Column():
341
  seg_output = gr.AnnotatedImage(label="Segmentation Map")
342
-
343
- seg_btn.click(
344
- fn=segment_image,
345
- inputs=seg_input,
346
- outputs=seg_output
347
- )
348
 
349
  demo.launch()
 
1
+ """
2
+ Image Processing Studio + NPH Neuroimaging Analysis
3
+ Unified Gradio app with filters, ML models, and NPH-specific segmentation.
4
+ """
5
+
6
  import gradio as gr
7
  import numpy as np
8
  from PIL import Image, ImageFilter, ImageEnhance, ImageOps
9
  from transformers import pipeline
10
+ import cv2
11
+ import json
12
+ import tempfile
13
+ import os
14
+
15
  from segment_neuroimaging import (
16
  segment_nph, segment_ventricles, compute_evans_index,
17
  compute_callosal_angle, compute_temporal_horn_width,
18
  compute_third_ventricle_width, score_pvh, assess_desh,
19
  create_overlay, add_annotations, create_comparison,
20
+ preprocess_image, create_roi_mask, morphological_cleanup,
21
+ filter_by_area, Modality, VENTRICLE_THRESHOLDS, CSF_MODE,
22
+ CSFAppearance, COLORS
23
  )
 
 
 
 
24
 
25
+ # ---- Load ML models (cached on first use) ----
26
  classifier = pipeline("image-classification", model="google/vit-base-patch16-224")
27
  detector = pipeline("object-detection", model="facebook/detr-resnet-50")
28
  segmenter = pipeline("image-segmentation", model="facebook/detr-resnet-50-panoptic")
29
 
30
+
31
+ # ===========================================================================
32
+ # Tab 1: NPH Neuroimaging Analysis (Enhanced)
33
+ # ===========================================================================
34
+
35
+ def analyze_nph(image, modality, sensitivity, overlay_alpha, pixel_spacing_str):
36
+ if image is None:
37
+ raise gr.Error("Please upload a brain MRI or CT image first.")
38
+
39
+ # Save temp file for the pipeline
40
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f:
41
+ Image.fromarray(image).save(f.name)
42
+ temp_path = f.name
43
+
44
+ try:
45
+ modality_map = {
46
+ "Axial FLAIR": "FLAIR",
47
+ "Axial T1": "T1",
48
+ "Axial T2": "T2",
49
+ "Coronal T2": "T2",
50
+ "Axial T2 FFE": "T2",
51
+ "Sagittal T1": "T1",
52
+ "CT Head": "CT_HEAD",
53
+ }
54
+ mod_key = modality_map.get(modality, "T1")
55
+ mod = Modality[mod_key]
56
+ is_coronal = "Coronal" in modality
57
+
58
+ # Parse pixel spacing
59
+ pixel_spacing = None
60
+ if pixel_spacing_str and pixel_spacing_str.strip():
61
+ try:
62
+ pixel_spacing = float(pixel_spacing_str.strip())
63
+ except ValueError:
64
+ pass
65
+
66
+ # Load and preprocess
67
+ img_rgb, gray, _ = preprocess_image(temp_path)
68
+ h, w = gray.shape[:2]
69
+ blurred = cv2.GaussianBlur(gray, (5, 5), 0)
70
+ roi_mask = create_roi_mask(blurred, threshold=30)
71
+
72
+ # Sensitivity-adjusted thresholds
73
+ orig_thresh = dict(VENTRICLE_THRESHOLDS[mod])
74
+ sens_adj = (sensitivity - 50) / 50.0
75
+
76
+ custom_thresholds = dict(orig_thresh)
77
+ if CSF_MODE[mod] == CSFAppearance.DARK:
78
+ custom_thresholds["csf_high"] = max(20, min(120,
79
+ int(orig_thresh["csf_high"] + sens_adj * 30)))
80
+ else:
81
+ custom_thresholds["csf_low"] = max(100, min(220,
82
+ int(orig_thresh["csf_low"] - sens_adj * 30)))
83
+
84
+ # Segment ventricles
85
+ vent_mask = segment_ventricles(gray, mod, roi_mask, custom_thresholds=custom_thresholds)
86
+
87
+ # Default pixel spacing estimate for non-DICOM
88
+ if pixel_spacing is None:
89
+ pixel_spacing = round(180.0 / max(w, 256), 2)
90
+
91
+ # Compute all biomarkers
92
+ ei_data = compute_evans_index(vent_mask, image_width=w, pixel_spacing_mm=pixel_spacing)
93
+ th_data = compute_temporal_horn_width(vent_mask, pixel_spacing)
94
+ tv_data = compute_third_ventricle_width(vent_mask, pixel_spacing)
95
+ desh_data = assess_desh(vent_mask, gray, roi_mask, mod, pixel_spacing)
96
+
97
+ # PVH (FLAIR only)
98
+ pvh_data = None
99
+ if mod == Modality.FLAIR:
100
+ pvh_data = score_pvh(gray, vent_mask)
101
+
102
+ # Callosal angle
103
+ ca_data = compute_callosal_angle(vent_mask) if is_coronal else {}
104
+
105
+ # Ventricle stats
106
+ vent_area = int((vent_mask > 0).sum())
107
+ brain_area = int((roi_mask > 0).sum())
108
+ vent_brain_ratio = round(vent_area / brain_area, 4) if brain_area > 0 else 0
109
+
110
+ # Build display masks
111
+ display_masks = {"ventricles": vent_mask}
112
+ parenchyma = cv2.bitwise_and(roi_mask, cv2.bitwise_not(vent_mask))
113
+ display_masks["parenchyma"] = parenchyma
114
+
115
+ if pvh_data and mod == Modality.FLAIR:
116
+ display_masks["pvh"] = pvh_data["pvh_mask"]
117
+ if "sylvian_mask" in desh_data:
118
+ display_masks["sylvian_fissures"] = desh_data["sylvian_mask"]
119
+ if "convexity_mask" in desh_data:
120
+ display_masks["high_convexity_sulci"] = desh_data["convexity_mask"]
121
+
122
+ # Visualization
123
+ overlay = create_overlay(img_rgb, display_masks, alpha=overlay_alpha)
124
+
125
+ biomarkers_for_annotation = dict(ei_data)
126
+ biomarkers_for_annotation.update(th_data)
127
+ if pvh_data:
128
+ biomarkers_for_annotation["pvh_grade"] = pvh_data["pvh_grade"]
129
+ biomarkers_for_annotation["is_desh_positive"] = desh_data["is_desh_positive"]
130
+ if ca_data.get("callosal_angle_deg") is not None:
131
+ biomarkers_for_annotation["callosal_angle_deg"] = ca_data["callosal_angle_deg"]
132
+
133
+ annotated = add_annotations(
134
+ overlay, display_masks,
135
+ f"{modality} -- NPH Analysis",
136
+ biomarkers_for_annotation,
137
+ )
138
+
139
+ # Draw Evans' index measurement line
140
+ row = ei_data.get("measurement_row", 0)
141
+ if row > 0:
142
+ cols = np.where(vent_mask[row, :] > 0)[0]
143
+ if len(cols) > 0:
144
+ minX, maxX = int(cols[0]), int(cols[-1])
145
+ cv2.line(annotated, (minX, row), (maxX, row), (255, 220, 0), 2)
146
+ skull_d = ei_data.get("skull_diameter_px", w)
147
+ center_x = w // 2
148
+ half_skull = skull_d // 2
149
+ cv2.line(annotated, (center_x - half_skull, row + 8),
150
+ (center_x + half_skull, row + 8), (200, 200, 200), 1)
151
+
152
+ comparison = create_comparison(img_rgb, annotated, f"{modality} -- NPH Analysis")
153
+
154
+ # Build report
155
+ report_lines = ["## NPH Biomarker Report\n"]
156
+
157
+ ei = ei_data.get("evans_index", 0)
158
+ status = "**ABNORMAL (>0.3)**" if ei > 0.3 else "Normal"
159
+ report_lines.append(f"**Evans' Index:** {ei:.3f} -- {status}")
160
+
161
+ if ei_data.get("frontal_horn_width_mm"):
162
+ report_lines.append(f" - Frontal horn width: {ei_data['frontal_horn_width_mm']} mm")
163
+ report_lines.append(f" - Skull diameter: {ei_data['skull_diameter_mm']} mm")
164
+
165
+ thw = th_data.get("temporal_horn_width_px", 0)
166
+ if thw > 0:
167
+ thw_mm = th_data.get("temporal_horn_width_mm", "")
168
+ mm_str = f" ({thw_mm} mm)" if thw_mm else ""
169
+ report_lines.append(f"**Temporal Horn Width:** {thw} px{mm_str}")
170
+
171
+ tvw = tv_data.get("third_ventricle_width_px", 0)
172
+ if tvw > 0:
173
+ tvw_mm = tv_data.get("third_ventricle_width_mm", "")
174
+ mm_str = f" ({tvw_mm} mm)" if tvw_mm else ""
175
+ report_lines.append(f"**Third Ventricle Width:** {tvw} px{mm_str}")
176
+
177
+ report_lines.append(f"**Ventricle/Brain Ratio:** {vent_brain_ratio:.4f} ({vent_area} / {brain_area} px)")
178
+
179
+ if pvh_data:
180
+ grade_desc = {0: "None", 1: "Pencil-thin rim", 2: "Smooth halo", 3: "Irregular, deep WM"}
181
+ report_lines.append(f"**PVH Grade:** {pvh_data['pvh_grade']}/3 -- {grade_desc.get(pvh_data['pvh_grade'], '')}")
182
+ report_lines.append(f" - PVH ratio: {pvh_data['pvh_ratio']:.4f}")
183
+
184
+ if ca_data.get("callosal_angle_deg") is not None:
185
+ ca = ca_data["callosal_angle_deg"]
186
+ ca_status = "Suggestive of NPH" if ca < 90 else ("Indeterminate" if ca < 120 else "Normal/ex vacuo")
187
+ report_lines.append(f"**Callosal Angle:** {ca:.1f} deg -- {ca_status}")
188
+
189
+ desh = desh_data.get("is_desh_positive", False)
190
+ desh_str = "**POSITIVE**" if desh else "Negative"
191
+ desh_score = desh_data.get("total_score", 0)
192
+ report_lines.append(f"\n### DESH Assessment: {desh_str} (score: {desh_score}/6)")
193
+ report_lines.append(f"- Ventriculomegaly: {desh_data.get('ventriculomegaly_score', 'N/A')}/2")
194
+ report_lines.append(f"- Sylvian dilation: {desh_data.get('sylvian_dilation_score', 'N/A')}/2")
195
+ report_lines.append(f"- Convexity tightness: {desh_data.get('convexity_tightness_score', 'N/A')}/2")
196
+ scr = desh_data.get("sylvian_convexity_ratio", "N/A")
197
+ report_lines.append(f"- Sylvian/Convexity ratio: {scr}")
198
+
199
+ report_lines.append(f"\n---\n*Sensitivity: {sensitivity}% | Thresholds: CSF [{custom_thresholds['csf_low']}-{custom_thresholds['csf_high']}] | Pixel spacing: {pixel_spacing} mm/px*")
200
+ report_lines.append("*Structures:* " + ", ".join(display_masks.keys()))
201
+
202
+ report = "\n".join(report_lines)
203
+ return annotated, comparison, report
204
+
205
+ finally:
206
+ os.unlink(temp_path)
207
+
208
+
209
+ # ===========================================================================
210
+ # Tab 2: Filters & Effects
211
+ # ===========================================================================
212
+
213
  def apply_filter(image, effect, intensity):
214
  if image is None:
215
  raise gr.Error("Please upload an image first.")
 
216
  img = Image.fromarray(image)
217
 
218
  if effect == "Grayscale":
 
249
  filtered = enhancer.enhance(0.5 + intensity * 2)
250
  else:
251
  filtered = img
 
252
  return np.array(filtered)
253
 
254
 
255
+ # ===========================================================================
256
+ # Tab 3-5: ML Models
257
+ # ===========================================================================
258
+
259
  def classify_image(image):
260
  if image is None:
261
  raise gr.Error("Please upload an image first.")
262
+ results = classifier(Image.fromarray(image))
 
263
  return {r["label"]: r["score"] for r in results}
264
 
 
 
265
  def detect_objects(image, threshold):
266
  if image is None:
267
  raise gr.Error("Please upload an image first.")
268
+ results = detector(Image.fromarray(image), threshold=threshold)
 
 
269
  annotations = []
270
  for r in results:
271
  box = r["box"]
 
273
  (box["xmin"], box["ymin"], box["xmax"], box["ymax"]),
274
  f"{r['label']} ({r['score']:.0%})"
275
  ))
 
276
  return (image, annotations)
277
 
 
 
278
  def segment_image(image):
279
  if image is None:
280
  raise gr.Error("Please upload an image first.")
281
+ results = segmenter(Image.fromarray(image))
 
 
282
  annotations = []
283
  for r in results:
284
  mask = np.array(r["mask"])
285
  annotations.append((mask, r["label"]))
 
286
  return (image, annotations)
287
 
288
 
289
+ # ===========================================================================
290
+ # Build the UI
291
+ # ===========================================================================
 
 
 
 
 
 
292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  css = """
294
  .main-title { text-align: center; margin-bottom: 0.5em; }
295
  .subtitle { text-align: center; color: #666; margin-top: 0; }
296
+ .nph-ref-table th, .nph-ref-table td { padding: 4px 12px; }
297
  """
298
 
299
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
300
  gr.Markdown("# Image Processing Studio", elem_classes="main-title")
301
  gr.Markdown(
302
+ "Filters, classification, object detection, panoptic segmentation, and **NPH neuroimaging analysis** -- all in one place.",
303
  elem_classes="subtitle"
304
  )
305
 
306
+ # ── NPH Analysis Tab ──
307
  with gr.Tab("NPH Analysis"):
308
  gr.Markdown(
309
+ "### Normal Pressure Hydrocephalus -- Segmentation & Biomarkers\n"
310
+ "Upload a brain MRI or CT scan. Computes Evans' index, DESH pattern, temporal horn width, "
311
+ "callosal angle (coronal), PVH scoring (FLAIR), and ventricle/brain ratio.\n\n"
312
+ "**Sensitivity slider** adjusts the CSF thresholds -- increase to capture more ventricle, "
313
+ "decrease to be more conservative."
314
  )
315
  with gr.Row():
316
+ with gr.Column(scale=1):
317
  nph_input = gr.Image(label="Upload Brain Scan", type="numpy")
318
  nph_modality = gr.Dropdown(
319
+ choices=["Axial FLAIR", "Axial T1", "Axial T2", "Coronal T2",
320
+ "Axial T2 FFE", "Sagittal T1", "CT Head"],
 
 
 
321
  value="Axial FLAIR",
322
  label="Modality / Sequence"
323
  )
324
+ nph_sensitivity = gr.Slider(
325
+ minimum=10, maximum=90, value=50, step=5,
326
+ label="Sensitivity (%)"
327
+ )
328
  nph_alpha = gr.Slider(
329
  minimum=0.1, maximum=0.9, value=0.45, step=0.05,
330
  label="Overlay Opacity"
331
  )
332
+ nph_spacing = gr.Textbox(
333
+ label="Pixel Spacing (mm/px)",
334
+ placeholder="e.g. 0.5 (leave blank for auto-estimate)",
335
+ value=""
336
+ )
337
+ nph_btn = gr.Button("Analyze for NPH", variant="primary", size="lg")
338
+
339
+ with gr.Column(scale=2):
340
  nph_overlay = gr.Image(label="Segmentation Overlay", type="numpy")
341
  nph_comparison = gr.Image(label="Side-by-Side Comparison", type="numpy")
342
 
 
344
 
345
  nph_btn.click(
346
  fn=analyze_nph,
347
+ inputs=[nph_input, nph_modality, nph_sensitivity, nph_alpha, nph_spacing],
348
  outputs=[nph_overlay, nph_comparison, nph_report]
349
  )
350
 
351
+ with gr.Accordion("NPH Reference Values & Interpretation Guide", open=False):
352
+ gr.Markdown(
353
+ "| Biomarker | Normal | Suggestive of NPH | Strongly suggestive |\n"
354
+ "|---|---|---|---|\n"
355
+ "| Evans' Index | < 0.3 | > 0.3 | > 0.33 |\n"
356
+ "| Callosal Angle | > 120 deg | < 90 deg | < 60 deg |\n"
357
+ "| Temporal Horn | < 2 mm | 2-5 mm | > 5 mm |\n"
358
+ "| Third Ventricle | < 5 mm | 5-10 mm | > 10 mm |\n"
359
+ "| PVH (FLAIR) | Grade 0 | Grade 2 | Grade 3 |\n"
360
+ "| DESH Pattern | Absent | -- | Present |\n\n"
361
+ "**DESH** (Disproportionately Enlarged Subarachnoid-space Hydrocephalus): "
362
+ "Enlarged sylvian fissures + tight high-convexity sulci + ventriculomegaly. "
363
+ "This pattern distinguishes iNPH from Alzheimer's and normal aging.\n\n"
364
+ "**Color Legend:** "
365
+ "Blue = Ventricles | Green = Parenchyma | Yellow = PVH | "
366
+ "Purple = Sylvian fissures | Orange = High-convexity sulci\n\n"
367
+ "*Note: Measurements from JPEG/PNG images without DICOM metadata are approximate. "
368
+ "For clinical use, provide pixel spacing from the DICOM header.*"
369
+ )
370
+
371
+ # ── Client-Side NPH Detector Tab ──
372
+ with gr.Tab("NPH Detector (Browser)"):
373
  gr.Markdown(
374
+ "### Client-Side NPH Detector\n"
375
+ "This tab runs the full NPH segmentation pipeline **entirely in your browser** using JavaScript. "
376
+ "No data is sent to any server -- everything stays on your device.\n\n"
377
+ "Upload a brain scan below and select the modality."
378
+ )
379
+ gr.HTML(
380
+ value='<iframe src="https://mmrech-nph-detector-js.hf.space" '
381
+ 'width="100%" height="900" frameborder="0" '
382
+ 'allow="clipboard-write" '
383
+ 'style="border-radius: 12px; border: 1px solid #333;"></iframe>',
384
  )
385
 
386
+ # ── Video Demo Tab ──
387
+ with gr.Tab("Video Demo"):
388
+ gr.Markdown(
389
+ "### Whole-Brain Segmentation Demo\n"
390
+ "Watch a slice-by-slice ventricle segmentation across a full MRI series."
391
+ )
392
+ gr.Video(
393
+ value="examples/hydromorph_whole_brain_segmentation.mp4",
394
+ label="NPH Segmentation Video",
395
+ autoplay=False,
396
+ )
397
+
398
+ # ── Filters Tab ──
399
  with gr.Tab("Filters & Effects"):
400
  with gr.Row():
401
  with gr.Column():
402
  filter_input = gr.Image(label="Upload Image", type="numpy")
403
  filter_effect = gr.Dropdown(
404
+ choices=["Grayscale", "Sepia", "Blur", "Sharpen", "Edge Detect",
405
+ "Emboss", "Invert", "Posterize", "Brightness", "Contrast"],
406
+ value="Sepia", label="Effect"
 
 
 
 
 
 
 
 
407
  )
408
+ filter_intensity = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.05, label="Intensity")
409
  filter_btn = gr.Button("Apply Filter", variant="primary")
410
  with gr.Column():
411
  filter_output = gr.Image(label="Result", type="numpy")
412
+ filter_btn.click(fn=apply_filter, inputs=[filter_input, filter_effect, filter_intensity], outputs=filter_output)
413
 
414
+ # ── Classification Tab ──
 
 
 
 
 
415
  with gr.Tab("Image Classification"):
416
  with gr.Row():
417
  with gr.Column():
 
419
  cls_btn = gr.Button("Classify", variant="primary")
420
  with gr.Column():
421
  cls_output = gr.Label(label="Predictions", num_top_classes=5)
422
+ cls_btn.click(fn=classify_image, inputs=cls_input, outputs=cls_output)
423
 
424
+ # ── Object Detection Tab ──
 
 
 
 
 
425
  with gr.Tab("Object Detection"):
426
  with gr.Row():
427
  with gr.Column():
428
  det_input = gr.Image(label="Upload Image", type="numpy")
429
+ det_threshold = gr.Slider(minimum=0.1, maximum=0.95, value=0.5, step=0.05, label="Confidence Threshold")
 
 
 
430
  det_btn = gr.Button("Detect Objects", variant="primary")
431
  with gr.Column():
432
  det_output = gr.AnnotatedImage(label="Detections")
433
+ det_btn.click(fn=detect_objects, inputs=[det_input, det_threshold], outputs=det_output)
434
 
435
+ # ── Segmentation Tab ──
 
 
 
 
 
436
  with gr.Tab("Segmentation"):
437
  with gr.Row():
438
  with gr.Column():
 
440
  seg_btn = gr.Button("Segment", variant="primary")
441
  with gr.Column():
442
  seg_output = gr.AnnotatedImage(label="Segmentation Map")
443
+ seg_btn.click(fn=segment_image, inputs=seg_input, outputs=seg_output)
 
 
 
 
 
444
 
445
  demo.launch()