SmartHeal commited on
Commit
38f2e7c
·
verified ·
1 Parent(s): 589d9fb

Update src/ai_processor.py

Browse files
Files changed (1) hide show
  1. src/ai_processor.py +103 -169
src/ai_processor.py CHANGED
@@ -232,117 +232,6 @@ initialize_cpu_models()
232
  setup_knowledge_base()
233
 
234
  # ---------- Calibration helpers ----------
235
- def _adaptive_prob_threshold(p: np.ndarray) -> float:
236
- """
237
- Pick a threshold that avoids tiny blobs while not swallowing skin.
238
- Strategy:
239
- - try Otsu on the prob map
240
- - clamp to a reasonable band [0.25, 0.65]
241
- - also consider percentile cut (p90) and take the "best" by area heuristic
242
- """
243
- p01 = np.clip(p.astype(np.float32), 0, 1)
244
- p255 = (p01 * 255).astype(np.uint8)
245
-
246
- # Otsu → use the returned scalar threshold (ret), NOT the image
247
- ret_otsu, _dst = cv2.threshold(p255, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
248
- thr_otsu = float(np.clip(ret_otsu / 255.0, 0.25, 0.65))
249
-
250
- # Percentile (90th)
251
- thr_pctl = float(np.clip(np.percentile(p01, 90), 0.25, 0.65))
252
-
253
- # Area fraction helper
254
- def area_frac(thr: float) -> float:
255
- return float((p01 >= thr).sum()) / float(p01.size)
256
-
257
- af_otsu = area_frac(thr_otsu)
258
- af_pctl = area_frac(thr_pctl)
259
-
260
- # Score: prefer ~3–10% coverage
261
- def score(af: float) -> float:
262
- target_low, target_high = 0.03, 0.10
263
- if af < target_low: return abs(af - target_low) * 3.0
264
- if af > target_high: return abs(af - target_high) * 1.5
265
- return 0.0
266
-
267
- return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
268
-
269
-
270
- # Score: closeness to a target area fraction (aim ~3–10%)
271
- def score(af):
272
- target_low, target_high = 0.03, 0.10
273
- if af < target_low: return abs(af - target_low) * 3.0
274
- if af > target_high: return abs(af - target_high) * 1.5
275
- return 0.0
276
-
277
- return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
278
-
279
-
280
- def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
281
- """
282
- Use OpenCV GrabCut to grow from a confident core into low-contrast margins.
283
- seed01: 1=probable FG core, 0=unknown/other
284
- """
285
- h, w = bgr.shape[:2]
286
- # Build GC mask: start with "unknown"
287
- gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
288
- # definite FG = dilated seed; probable FG = seed
289
- k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
290
- seed_dil = cv2.dilate(seed01, k, iterations=1)
291
- gc[seed01.astype(bool)] = cv2.GC_PR_FGD
292
- gc[seed_dil.astype(bool)] = cv2.GC_FGD
293
- # border is probable background
294
- gc[0, :], gc[-1, :], gc[:, 0], gc[:, -1] = cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD
295
-
296
- bgdModel = np.zeros((1, 65), np.float64)
297
- fgdModel = np.zeros((1, 65), np.float64)
298
- cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
299
-
300
- # FG = definite or probable foreground
301
- mask01 = np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
302
- return mask01
303
-
304
-
305
- def _fill_holes(mask01: np.ndarray) -> np.ndarray:
306
- h, w = mask01.shape[:2]
307
- ff = np.zeros((h + 2, w + 2), np.uint8)
308
- m = (mask01 * 255).astype(np.uint8).copy()
309
- cv2.floodFill(m, ff, (0, 0), 255)
310
- m_inv = cv2.bitwise_not(m)
311
- out = ((mask01 * 255) | m_inv) // 255
312
- return out.astype(np.uint8)
313
-
314
-
315
- def _clean_mask(mask01: np.ndarray) -> np.ndarray:
316
- """Open → Close → Fill holes → Largest component → light smooth."""
317
- mask01 = (mask01 > 0).astype(np.uint8)
318
- k3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
319
- k5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
320
- mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k3, iterations=1)
321
- mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k5, iterations=2)
322
- mask01 = _fill_holes(mask01)
323
-
324
- # keep largest component
325
- num, labels, stats, _ = cv2.connectedComponentsWithStats(mask01, 8)
326
- if num > 1:
327
- areas = stats[1:, cv2.CC_STAT_AREA]
328
- if areas.size:
329
- largest_idx = 1 + int(np.argmax(areas))
330
- mask01 = (labels == largest_idx).astype(np.uint8)
331
-
332
- # tiny masks → gentle grow (distance transform based)
333
- area = int(mask01.sum())
334
- if area > 0:
335
- grow = 1 if area < 2000 else 0
336
- if grow:
337
- k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
338
- mask01 = cv2.dilate(mask01, k, iterations=1)
339
-
340
- return (mask01 > 0).astype(np.uint8)
341
-
342
-
343
-
344
-
345
-
346
  def _exif_to_dict(pil_img: Image.Image) -> Dict[str, object]:
347
  out = {}
348
  try:
@@ -396,7 +285,6 @@ def estimate_px_per_cm_from_exif(pil_img: Image.Image, default_px_per_cm: float
396
 
397
  # ---------- Segmentation helpers ----------
398
  def _imagenet_norm(arr: np.ndarray) -> np.ndarray:
399
- # expects RGB 0..255 -> float
400
  mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
401
  std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
402
  return (arr.astype(np.float32) - mean) / std
@@ -420,25 +308,80 @@ def _to_prob(pred: np.ndarray) -> np.ndarray:
420
  p = 1.0 / (1.0 + np.exp(-p))
421
  return p.astype(np.float32)
422
 
423
- # ---- Robust mask post-processing (for "proper" masking) ----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
  def _fill_holes(mask01: np.ndarray) -> np.ndarray:
425
- # Flood-fill from border, then invert
426
  h, w = mask01.shape[:2]
427
  ff = np.zeros((h + 2, w + 2), np.uint8)
428
  m = (mask01 * 255).astype(np.uint8).copy()
429
  cv2.floodFill(m, ff, (0, 0), 255)
430
  m_inv = cv2.bitwise_not(m)
431
- # Combine original with filled holes
432
  out = ((mask01 * 255) | m_inv) // 255
433
  return out.astype(np.uint8)
434
 
435
- # Global last debug dict (per-process) to attach into results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  _last_seg_debug: Dict[str, object] = {}
437
 
438
  def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndarray, Dict[str, object]]:
439
  """
440
- TF model → adaptive threshold on prob → (optional) GrabCut grow → cleanup.
441
- Falls back to KMeans-Lab when model missing/fails.
442
  Returns (mask_uint8_0_255, debug_dict)
443
  """
444
  debug = {"used": None, "reason": None, "positive_fraction": 0.0,
@@ -454,21 +397,17 @@ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndar
454
  raise ValueError(f"Bad seg input_shape: {ishape}")
455
  th, tw = int(ishape[1]), int(ishape[2])
456
 
457
- # preprocess
458
  x = _preprocess_for_seg(image_bgr, (th, tw))
459
- rgb_for_view = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
460
  roi_seen_path = None
461
  if SMARTHEAL_DEBUG:
462
  roi_seen_path = os.path.join(out_dir, f"roi_for_seg_{ts}.png")
463
- cv2.imwrite(roi_seen_path, cv2.cvtColor(rgb_for_view, cv2.COLOR_RGB2BGR))
464
 
465
- # predict → prob map back to ROI size
466
  pred = seg_model.predict(x, verbose=0)
467
  if isinstance(pred, (list, tuple)): pred = pred[0]
468
  p = _to_prob(pred)
469
  p = cv2.resize(p, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
470
 
471
- # visualization (optional)
472
  heatmap_path = None
473
  if SMARTHEAL_DEBUG:
474
  hm = (np.clip(p, 0, 1) * 255).astype(np.uint8)
@@ -476,19 +415,16 @@ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndar
476
  heatmap_path = os.path.join(out_dir, f"seg_pred_heatmap_{ts}.png")
477
  cv2.imwrite(heatmap_path, heat)
478
 
479
- # --- Adaptive threshold ---
480
  thr = _adaptive_prob_threshold(p)
481
  core01 = (p >= thr).astype(np.uint8)
482
  core_frac = float(core01.sum()) / float(core01.size)
483
 
484
- # If still too tiny, try a gentler threshold
485
  if core_frac < 0.005:
486
  thr2 = max(thr - 0.10, 0.15)
487
  core01 = (p >= thr2).astype(np.uint8)
488
  thr = thr2
489
  core_frac = float(core01.sum()) / float(core01.size)
490
 
491
- # --- Grow with GrabCut (only if some core exists) ---
492
  if core01.any():
493
  gc01 = _grabcut_refine(image_bgr, core01, iters=3)
494
  mask01 = _clean_mask(gc01)
@@ -496,13 +432,13 @@ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndar
496
  mask01 = np.zeros(core01.shape, np.uint8)
497
 
498
  pos_frac = float(mask01.sum()) / float(mask01.size)
499
- logging.info(f"SegModel USED | thr={thr:.2f} core_frac={core_frac:.4f} final_frac={pos_frac:.4f}")
500
 
501
  debug.update({
502
  "used": "tf_model",
503
  "reason": "ok",
504
  "positive_fraction": pos_frac,
505
- "thr": thr,
506
  "heatmap_path": heatmap_path,
507
  "roi_seen_by_model": roi_seen_path
508
  })
@@ -533,7 +469,6 @@ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndar
533
  })
534
  return (mask01 * 255).astype(np.uint8), debug
535
 
536
-
537
  # ---------- Measurement + overlay helpers ----------
538
  def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.ndarray:
539
  num, labels, stats, _ = cv2.connectedComponentsWithStats(binary01.astype(np.uint8), connectivity=8)
@@ -545,17 +480,6 @@ def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.nd
545
  largest_idx = 1 + int(np.argmax(areas))
546
  return (labels == largest_idx).astype(np.uint8)
547
 
548
- def _clean_mask(mask01: np.ndarray) -> np.ndarray:
549
- """Open→Close→Fill holes→Largest component."""
550
- if mask01.dtype != np.uint8:
551
- mask01 = mask01.astype(np.uint8)
552
- k = np.ones((3, 3), np.uint8)
553
- mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k, iterations=1)
554
- mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k, iterations=2)
555
- mask01 = _fill_holes(mask01)
556
- mask01 = largest_component_mask(mask01, min_area_px=30)
557
- return (mask01 > 0).astype(np.uint8)
558
-
559
  def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, float, Tuple]:
560
  contours, _ = cv2.findContours(mask01.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
561
  if not contours:
@@ -569,9 +493,23 @@ def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float,
569
  box = cv2.boxPoints(rect).astype(int)
570
  return length_cm, breadth_cm, (box, rect[0])
571
 
572
- def count_area_cm2(mask01: np.ndarray, px_per_cm: float) -> float:
573
- px_count = float(mask01.astype(bool).sum())
574
- return round(px_count / (max(px_per_cm, 1e-6) ** 2), 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575
 
576
  def draw_measurement_overlay(
577
  base_bgr: np.ndarray,
@@ -582,16 +520,13 @@ def draw_measurement_overlay(
582
  thickness: int = 2
583
  ) -> np.ndarray:
584
  """
585
- Draws:
586
- 1) Strong red mask overlay with white contour.
587
- 2) Min-area rectangle.
588
- 3) Two double-headed arrows:
589
- - 'Length' along the longer side.
590
- - 'Width' along the shorter side.
591
  """
592
  overlay = base_bgr.copy()
593
 
594
- # --- Strong overlay from mask (tinted red where mask==1) ---
595
  mask255 = (mask01 * 255).astype(np.uint8)
596
  mask3 = cv2.merge([mask255, mask255, mask255])
597
  red = np.zeros_like(overlay); red[:] = (0, 0, 255)
@@ -599,7 +534,7 @@ def draw_measurement_overlay(
599
  tinted = cv2.addWeighted(overlay, 1 - alpha, red, alpha, 0)
600
  overlay = np.where(mask3 > 0, tinted, overlay)
601
 
602
- # Draw wound contour
603
  cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
604
  if cnts:
605
  cv2.drawContours(overlay, cnts, -1, (255, 255, 255), 2)
@@ -608,19 +543,11 @@ def draw_measurement_overlay(
608
  cv2.polylines(overlay, [rect_box], True, (255, 255, 255), thickness)
609
  pts = rect_box.reshape(-1, 2)
610
 
611
- def midpoint(a, b):
612
- return (int((a[0] + b[0]) / 2), int((a[1] + b[1]) / 2))
613
-
614
- # Edge lengths
615
  e = [np.linalg.norm(pts[i] - pts[(i + 1) % 4]) for i in range(4)]
616
  long_edge_idx = int(np.argmax(e))
617
- short_edge_idx = (long_edge_idx + 1) % 2 # 0/1 map for pairs below
618
-
619
- # Midpoints of opposite edges for arrows
620
  mids = [midpoint(pts[i], pts[(i + 1) % 4]) for i in range(4)]
621
- # Long side uses edges long_edge_idx and the opposite edge (i+2)
622
  long_pair = (long_edge_idx, (long_edge_idx + 2) % 4)
623
- # Short side uses the other pair
624
  short_pair = ((long_edge_idx + 1) % 4, (long_edge_idx + 3) % 4)
625
 
626
  def draw_double_arrow(img, p1, p2):
@@ -634,7 +561,6 @@ def draw_measurement_overlay(
634
  cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 4, cv2.LINE_AA)
635
  cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
636
 
637
- # Draw arrows and labels
638
  draw_double_arrow(overlay, mids[long_pair[0]], mids[long_pair[1]])
639
  draw_double_arrow(overlay, mids[short_pair[0]], mids[short_pair[1]])
640
  put_label(f"Length: {length_cm:.2f} cm", mids[long_pair[0]])
@@ -663,6 +589,11 @@ class AIProcessor:
663
  """
664
  try:
665
  px_per_cm, exif_meta = estimate_px_per_cm_from_exif(image_pil, DEFAULT_PX_PER_CM)
 
 
 
 
 
666
  image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
667
 
668
  # --- Detection ---
@@ -696,20 +627,23 @@ class AIProcessor:
696
  mask_u8_255, seg_debug = segment_wound(roi, ts, out_dir)
697
  mask01 = (mask_u8_255 > 127).astype(np.uint8)
698
 
699
- # Robust post-processing to ensure "proper" masking
700
  if mask01.any():
701
  mask01 = _clean_mask(mask01)
702
  logging.debug(f"Mask postproc: px_after={int(mask01.sum())}")
703
 
704
- # --- Measurement ---
705
  if mask01.any():
706
  length_cm, breadth_cm, (box_pts, _) = measure_min_area_rect(mask01, px_per_cm)
707
- surface_area_cm2 = count_area_cm2(mask01, px_per_cm)
708
- # Final annotated ROI with mask + arrows + labels
 
 
 
 
709
  anno_roi = draw_measurement_overlay(roi, mask01, box_pts, length_cm, breadth_cm)
710
  segmentation_empty = False
711
  else:
712
- # Graceful fallback if seg failed: use ROI box as bounds
713
  h_px = max(0, y2 - y1); w_px = max(0, x2 - x1)
714
  length_cm = round(max(h_px, w_px) / px_per_cm, 2)
715
  breadth_cm = round(min(h_px, w_px) / px_per_cm, 2)
@@ -733,7 +667,7 @@ class AIProcessor:
733
  roi_mask_path = os.path.join(out_dir, f"roi_mask_{ts}.png")
734
  cv2.imwrite(roi_mask_path, (mask01 * 255).astype(np.uint8))
735
 
736
- # ROI overlay (clear mask w/ white contour, no arrows)
737
  mask255 = (mask01 * 255).astype(np.uint8)
738
  mask3 = cv2.merge([mask255, mask255, mask255])
739
  red = np.zeros_like(roi); red[:] = (0, 0, 255)
@@ -776,7 +710,7 @@ class AIProcessor:
776
  "seg_used": seg_debug.get("used"),
777
  "seg_reason": seg_debug.get("reason"),
778
  "positive_fraction": round(float(seg_debug.get("positive_fraction", 0.0)), 6),
779
- "threshold": seg_debug.get("threshold", SEG_THRESH),
780
  "segmentation_empty": segmentation_empty,
781
  "exif_px_per_cm": round(px_per_cm, 3),
782
  }
@@ -983,4 +917,4 @@ Automated analysis provides quantitative measurements; verify via clinical exami
983
  "report": f"Analysis initialization failed: {str(e)}",
984
  "saved_image_path": None,
985
  "guideline_context": "",
986
- }
 
232
  setup_knowledge_base()
233
 
234
  # ---------- Calibration helpers ----------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
  def _exif_to_dict(pil_img: Image.Image) -> Dict[str, object]:
236
  out = {}
237
  try:
 
285
 
286
  # ---------- Segmentation helpers ----------
287
  def _imagenet_norm(arr: np.ndarray) -> np.ndarray:
 
288
  mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
289
  std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
290
  return (arr.astype(np.float32) - mean) / std
 
308
  p = 1.0 / (1.0 + np.exp(-p))
309
  return p.astype(np.float32)
310
 
311
+ # ---- Adaptive threshold + GrabCut grow ----
312
+ def _adaptive_prob_threshold(p: np.ndarray) -> float:
313
+ """
314
+ Choose a threshold that avoids tiny blobs while not swallowing skin.
315
+ Try Otsu and the 90th percentile, clamp to [0.25, 0.65], pick by area heuristic.
316
+ """
317
+ p01 = np.clip(p.astype(np.float32), 0, 1)
318
+ p255 = (p01 * 255).astype(np.uint8)
319
+
320
+ ret_otsu, _ = cv2.threshold(p255, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
321
+ thr_otsu = float(np.clip(ret_otsu / 255.0, 0.25, 0.65))
322
+ thr_pctl = float(np.clip(np.percentile(p01, 90), 0.25, 0.65))
323
+
324
+ def area_frac(thr: float) -> float:
325
+ return float((p01 >= thr).sum()) / float(p01.size)
326
+
327
+ af_otsu = area_frac(thr_otsu)
328
+ af_pctl = area_frac(thr_pctl)
329
+
330
+ def score(af: float) -> float:
331
+ target_low, target_high = 0.03, 0.10
332
+ if af < target_low: return abs(af - target_low) * 3.0
333
+ if af > target_high: return abs(af - target_high) * 1.5
334
+ return 0.0
335
+
336
+ return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
337
+
338
+ def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
339
+ """Grow from a confident core into low-contrast margins."""
340
+ h, w = bgr.shape[:2]
341
+ gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
342
+ k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
343
+ seed_dil = cv2.dilate(seed01, k, iterations=1)
344
+ gc[seed01.astype(bool)] = cv2.GC_PR_FGD
345
+ gc[seed_dil.astype(bool)] = cv2.GC_FGD
346
+ gc[0, :], gc[-1, :], gc[:, 0], gc[:, -1] = cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD
347
+ bgdModel = np.zeros((1, 65), np.float64)
348
+ fgdModel = np.zeros((1, 65), np.float64)
349
+ cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
350
+ return np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
351
+
352
  def _fill_holes(mask01: np.ndarray) -> np.ndarray:
 
353
  h, w = mask01.shape[:2]
354
  ff = np.zeros((h + 2, w + 2), np.uint8)
355
  m = (mask01 * 255).astype(np.uint8).copy()
356
  cv2.floodFill(m, ff, (0, 0), 255)
357
  m_inv = cv2.bitwise_not(m)
 
358
  out = ((mask01 * 255) | m_inv) // 255
359
  return out.astype(np.uint8)
360
 
361
+ def _clean_mask(mask01: np.ndarray) -> np.ndarray:
362
+ """Open → Close → Fill holes → Largest component (no dilation)."""
363
+ mask01 = (mask01 > 0).astype(np.uint8)
364
+ k3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
365
+ k5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
366
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k3, iterations=1)
367
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k5, iterations=1)
368
+ mask01 = _fill_holes(mask01)
369
+ # Keep largest component only
370
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(mask01, 8)
371
+ if num > 1:
372
+ areas = stats[1:, cv2.CC_STAT_AREA]
373
+ if areas.size:
374
+ largest_idx = 1 + int(np.argmax(areas))
375
+ mask01 = (labels == largest_idx).astype(np.uint8)
376
+ return (mask01 > 0).astype(np.uint8)
377
+
378
+ # Global last debug dict (per-process)
379
  _last_seg_debug: Dict[str, object] = {}
380
 
381
  def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndarray, Dict[str, object]]:
382
  """
383
+ TF model → adaptive threshold on prob → GrabCut grow → cleanup.
384
+ Fallback: KMeans-Lab.
385
  Returns (mask_uint8_0_255, debug_dict)
386
  """
387
  debug = {"used": None, "reason": None, "positive_fraction": 0.0,
 
397
  raise ValueError(f"Bad seg input_shape: {ishape}")
398
  th, tw = int(ishape[1]), int(ishape[2])
399
 
 
400
  x = _preprocess_for_seg(image_bgr, (th, tw))
 
401
  roi_seen_path = None
402
  if SMARTHEAL_DEBUG:
403
  roi_seen_path = os.path.join(out_dir, f"roi_for_seg_{ts}.png")
404
+ cv2.imwrite(roi_seen_path, image_bgr)
405
 
 
406
  pred = seg_model.predict(x, verbose=0)
407
  if isinstance(pred, (list, tuple)): pred = pred[0]
408
  p = _to_prob(pred)
409
  p = cv2.resize(p, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
410
 
 
411
  heatmap_path = None
412
  if SMARTHEAL_DEBUG:
413
  hm = (np.clip(p, 0, 1) * 255).astype(np.uint8)
 
415
  heatmap_path = os.path.join(out_dir, f"seg_pred_heatmap_{ts}.png")
416
  cv2.imwrite(heatmap_path, heat)
417
 
 
418
  thr = _adaptive_prob_threshold(p)
419
  core01 = (p >= thr).astype(np.uint8)
420
  core_frac = float(core01.sum()) / float(core01.size)
421
 
 
422
  if core_frac < 0.005:
423
  thr2 = max(thr - 0.10, 0.15)
424
  core01 = (p >= thr2).astype(np.uint8)
425
  thr = thr2
426
  core_frac = float(core01.sum()) / float(core01.size)
427
 
 
428
  if core01.any():
429
  gc01 = _grabcut_refine(image_bgr, core01, iters=3)
430
  mask01 = _clean_mask(gc01)
 
432
  mask01 = np.zeros(core01.shape, np.uint8)
433
 
434
  pos_frac = float(mask01.sum()) / float(mask01.size)
435
+ logging.info(f"SegModel USED | thr={float(thr):.2f} core_frac={core_frac:.4f} final_frac={pos_frac:.4f}")
436
 
437
  debug.update({
438
  "used": "tf_model",
439
  "reason": "ok",
440
  "positive_fraction": pos_frac,
441
+ "thr": float(thr),
442
  "heatmap_path": heatmap_path,
443
  "roi_seen_by_model": roi_seen_path
444
  })
 
469
  })
470
  return (mask01 * 255).astype(np.uint8), debug
471
 
 
472
  # ---------- Measurement + overlay helpers ----------
473
  def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.ndarray:
474
  num, labels, stats, _ = cv2.connectedComponentsWithStats(binary01.astype(np.uint8), connectivity=8)
 
480
  largest_idx = 1 + int(np.argmax(areas))
481
  return (labels == largest_idx).astype(np.uint8)
482
 
 
 
 
 
 
 
 
 
 
 
 
483
  def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, float, Tuple]:
484
  contours, _ = cv2.findContours(mask01.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
485
  if not contours:
 
493
  box = cv2.boxPoints(rect).astype(int)
494
  return length_cm, breadth_cm, (box, rect[0])
495
 
496
+ def area_cm2_from_contour(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, Optional[np.ndarray]]:
497
+ """Area from largest polygon (sub-pixel); returns (area_cm2, contour)."""
498
+ m = (mask01 > 0).astype(np.uint8)
499
+ contours, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
500
+ if not contours:
501
+ return 0.0, None
502
+ cnt = max(contours, key=cv2.contourArea)
503
+ poly_area_px2 = float(cv2.contourArea(cnt))
504
+ area_cm2 = round(poly_area_px2 / (max(px_per_cm, 1e-6) ** 2), 2)
505
+ return area_cm2, cnt
506
+
507
+ def clamp_area_with_minrect(cnt: np.ndarray, px_per_cm: float, area_cm2_poly: float) -> float:
508
+ rect = cv2.minAreaRect(cnt)
509
+ (w_px, h_px) = rect[1]
510
+ rect_area_px2 = float(max(w_px, 0.0) * max(h_px, 0.0))
511
+ rect_area_cm2 = rect_area_px2 / (max(px_per_cm, 1e-6) ** 2)
512
+ return round(min(area_cm2_poly, rect_area_cm2 * 1.05), 2)
513
 
514
  def draw_measurement_overlay(
515
  base_bgr: np.ndarray,
 
520
  thickness: int = 2
521
  ) -> np.ndarray:
522
  """
523
+ 1) Strong red mask overlay + white contour
524
+ 2) Min-area rectangle
525
+ 3) Double-headed arrows labeled Length/Width
 
 
 
526
  """
527
  overlay = base_bgr.copy()
528
 
529
+ # Mask tint
530
  mask255 = (mask01 * 255).astype(np.uint8)
531
  mask3 = cv2.merge([mask255, mask255, mask255])
532
  red = np.zeros_like(overlay); red[:] = (0, 0, 255)
 
534
  tinted = cv2.addWeighted(overlay, 1 - alpha, red, alpha, 0)
535
  overlay = np.where(mask3 > 0, tinted, overlay)
536
 
537
+ # Contour
538
  cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
539
  if cnts:
540
  cv2.drawContours(overlay, cnts, -1, (255, 255, 255), 2)
 
543
  cv2.polylines(overlay, [rect_box], True, (255, 255, 255), thickness)
544
  pts = rect_box.reshape(-1, 2)
545
 
546
+ def midpoint(a, b): return (int((a[0] + b[0]) / 2), int((a[1] + b[1]) / 2))
 
 
 
547
  e = [np.linalg.norm(pts[i] - pts[(i + 1) % 4]) for i in range(4)]
548
  long_edge_idx = int(np.argmax(e))
 
 
 
549
  mids = [midpoint(pts[i], pts[(i + 1) % 4]) for i in range(4)]
 
550
  long_pair = (long_edge_idx, (long_edge_idx + 2) % 4)
 
551
  short_pair = ((long_edge_idx + 1) % 4, (long_edge_idx + 3) % 4)
552
 
553
  def draw_double_arrow(img, p1, p2):
 
561
  cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 4, cv2.LINE_AA)
562
  cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
563
 
 
564
  draw_double_arrow(overlay, mids[long_pair[0]], mids[long_pair[1]])
565
  draw_double_arrow(overlay, mids[short_pair[0]], mids[short_pair[1]])
566
  put_label(f"Length: {length_cm:.2f} cm", mids[long_pair[0]])
 
589
  """
590
  try:
591
  px_per_cm, exif_meta = estimate_px_per_cm_from_exif(image_pil, DEFAULT_PX_PER_CM)
592
+ # Guardrails for calibration to avoid huge area blow-ups
593
+ px_per_cm = float(np.clip(px_per_cm, 20.0, 350.0))
594
+ if (exif_meta or {}).get("used") != "exif":
595
+ logging.warning(f"Calibration fallback used: px_per_cm={px_per_cm:.2f} (default). Prefer ruler/Aruco for accuracy.")
596
+
597
  image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
598
 
599
  # --- Detection ---
 
627
  mask_u8_255, seg_debug = segment_wound(roi, ts, out_dir)
628
  mask01 = (mask_u8_255 > 127).astype(np.uint8)
629
 
 
630
  if mask01.any():
631
  mask01 = _clean_mask(mask01)
632
  logging.debug(f"Mask postproc: px_after={int(mask01.sum())}")
633
 
634
+ # --- Measurement (accurate & conservative) ---
635
  if mask01.any():
636
  length_cm, breadth_cm, (box_pts, _) = measure_min_area_rect(mask01, px_per_cm)
637
+ area_poly_cm2, largest_cnt = area_cm2_from_contour(mask01, px_per_cm)
638
+ if largest_cnt is not None:
639
+ surface_area_cm2 = clamp_area_with_minrect(largest_cnt, px_per_cm, area_poly_cm2)
640
+ else:
641
+ surface_area_cm2 = area_poly_cm2
642
+
643
  anno_roi = draw_measurement_overlay(roi, mask01, box_pts, length_cm, breadth_cm)
644
  segmentation_empty = False
645
  else:
646
+ # Fallback if seg failed: use ROI dimensions
647
  h_px = max(0, y2 - y1); w_px = max(0, x2 - x1)
648
  length_cm = round(max(h_px, w_px) / px_per_cm, 2)
649
  breadth_cm = round(min(h_px, w_px) / px_per_cm, 2)
 
667
  roi_mask_path = os.path.join(out_dir, f"roi_mask_{ts}.png")
668
  cv2.imwrite(roi_mask_path, (mask01 * 255).astype(np.uint8))
669
 
670
+ # ROI overlay (mask tint + contour, without arrows)
671
  mask255 = (mask01 * 255).astype(np.uint8)
672
  mask3 = cv2.merge([mask255, mask255, mask255])
673
  red = np.zeros_like(roi); red[:] = (0, 0, 255)
 
710
  "seg_used": seg_debug.get("used"),
711
  "seg_reason": seg_debug.get("reason"),
712
  "positive_fraction": round(float(seg_debug.get("positive_fraction", 0.0)), 6),
713
+ "threshold": seg_debug.get("thr"),
714
  "segmentation_empty": segmentation_empty,
715
  "exif_px_per_cm": round(px_per_cm, 3),
716
  }
 
917
  "report": f"Analysis initialization failed: {str(e)}",
918
  "saved_image_path": None,
919
  "guideline_context": "",
920
+ }