SmartHeal commited on
Commit
271aabd
Β·
verified Β·
1 Parent(s): f667942

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +899 -43
app.py CHANGED
@@ -1,56 +1,912 @@
1
- #!/usr/bin/env python3
 
 
2
 
3
  import os
 
4
  import logging
5
- import traceback
6
- import gradio as gr
7
- import spaces
8
 
9
- # Import internal modules
10
- from src.config import Config
11
- from src.database import DatabaseManager
12
- from src.auth import AuthManager
13
- from src.ai_processor import AIProcessor
14
- from src.ui_components_original import UIComponents
15
 
16
- # Logging setup
17
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
 
18
 
19
- class SmartHealApp:
20
- def __init__(self):
21
- self.ui_components = None
22
- try:
23
- self.config = Config()
24
- self.database_manager = DatabaseManager(self.config.get_mysql_config())
25
- self.auth_manager = AuthManager(self.database_manager)
26
- self.ai_processor = AIProcessor()
27
- self.ui_components = UIComponents(
28
- self.auth_manager,
29
- self.database_manager,
30
- self.ai_processor
31
- )
32
- logging.info("βœ… SmartHeal App initialized successfully.")
33
- except Exception as e:
34
- logging.error(f"Initialization error: {e}")
35
- traceback.print_exc()
36
- raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- def launch(self, port=7860, share=True):
39
- interface = self.ui_components.create_interface()
40
- interface.launch(
41
- share=share
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  try:
47
- app = SmartHealApp()
48
- app.launch()
49
- except KeyboardInterrupt:
50
- logging.info("App interrupted by user.")
 
 
51
  except Exception:
52
- logging.error("App failed to start.")
53
- raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- if __name__ == "__main__":
56
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # smartheal_ai_processor.py
2
+ # Verbose, instrumented version β€” preserves public class/function names
3
+ # Turn on deep logging: export LOGLEVEL=DEBUG SMARTHEAL_DEBUG=1
4
 
5
  import os
6
+ import time
7
  import logging
8
+ from datetime import datetime
9
+ from typing import Optional, Dict, List, Tuple
 
10
 
11
+ # ---- Environment defaults ----
12
+ os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
13
+ os.environ.setdefault("CUDA_VISIBLE_DEVICES", "")
14
+ LOGLEVEL = os.getenv("LOGLEVEL", "INFO").upper()
15
+ SMARTHEAL_DEBUG = os.getenv("SMARTHEAL_DEBUG", "0") == "1"
 
16
 
17
+ import cv2
18
+ import numpy as np
19
+ from PIL import Image
20
+ from PIL.ExifTags import TAGS
21
 
22
+ # --- Logging config ---
23
+ logging.basicConfig(
24
+ level=getattr(logging, LOGLEVEL, logging.INFO),
25
+ format="%(asctime)s - %(levelname)s - %(message)s",
26
+ )
27
+
28
+ def _log_kv(prefix: str, kv: Dict):
29
+ logging.debug(prefix + " | " + " | ".join(f"{k}={v}" for k, v in kv.items()))
30
+
31
+ # --- Optional Spaces GPU stub (harmless) ---
32
+ try:
33
+ import spaces as _spaces
34
+ @_spaces.GPU(enable_queue=False)
35
+ def smartheal_gpu_stub(ping: int = 0) -> str:
36
+ return "ready"
37
+ logging.info("Registered @spaces.GPU stub (enable_queue=False).")
38
+ except Exception:
39
+ pass
40
+
41
+ UPLOADS_DIR = "uploads"
42
+ os.makedirs(UPLOADS_DIR, exist_ok=True)
43
+
44
+ HF_TOKEN = os.getenv("HF_TOKEN", None)
45
+ YOLO_MODEL_PATH = "src/best.pt"
46
+ SEG_MODEL_PATH = "src/segmentation_model.h5" # optional
47
+ GUIDELINE_PDFS = ["src/eHealth in Wound Care.pdf", "src/IWGDF Guideline.pdf", "src/evaluation.pdf"]
48
+ DATASET_ID = "SmartHeal/wound-image-uploads"
49
+ DEFAULT_PX_PER_CM = 38.0
50
+ PX_PER_CM_MIN, PX_PER_CM_MAX = 5.0, 1200.0
51
+
52
+ # Segmentation preprocessing knobs
53
+ SEG_EXPECTS_RGB = os.getenv("SEG_EXPECTS_RGB", "1") == "1" # most TF models trained on RGB
54
+ SEG_NORM = os.getenv("SEG_NORM", "0to1") # "0to1" | "imagenet"
55
+ SEG_THRESH = float(os.getenv("SEG_THRESH", "0.5"))
56
+
57
+ models_cache: Dict[str, object] = {}
58
+ knowledge_base_cache: Dict[str, object] = {}
59
+
60
+ # ---------- Lazy imports ----------
61
+ def _import_ultralytics():
62
+ from ultralytics import YOLO
63
+ return YOLO
64
+
65
+ def _import_tf_loader():
66
+ import tensorflow as tf
67
+ try:
68
+ tf.config.set_visible_devices([], "GPU") # keep TF on CPU
69
+ except Exception:
70
+ pass
71
+ from tensorflow.keras.models import load_model
72
+ return load_model
73
+
74
+ def _import_hf_cls():
75
+ from transformers import pipeline
76
+ return pipeline
77
+
78
+ def _import_embeddings():
79
+ from langchain_community.embeddings import HuggingFaceEmbeddings
80
+ return HuggingFaceEmbeddings
81
+
82
+ def _import_langchain_pdf():
83
+ from langchain_community.document_loaders import PyPDFLoader
84
+ return PyPDFLoader
85
+
86
+ def _import_langchain_faiss():
87
+ from langchain_community.vectorstores import FAISS
88
+ return FAISS
89
+
90
+ def _import_hf_hub():
91
+ from huggingface_hub import HfApi, HfFolder
92
+ return HfApi, HfFolder
93
 
94
+ # ---------- VLM (disabled by default) ----------
95
+ def generate_medgemma_report(
96
+ patient_info: str,
97
+ visual_results: Dict,
98
+ guideline_context: str,
99
+ image_pil: Image.Image,
100
+ max_new_tokens: Optional[int] = None,
101
+ ) -> str:
102
+ if os.getenv("SMARTHEAL_ENABLE_VLM", "0") != "1":
103
+ return "⚠️ VLM disabled"
104
+ try:
105
+ from transformers import pipeline
106
+ pipe = pipeline(
107
+ task="image-text-to-text",
108
+ model="google/medgemma-4b-it",
109
+ device_map=None,
110
+ token=HF_TOKEN,
111
+ trust_remote_code=True,
112
+ model_kwargs={"low_cpu_mem_usage": True},
113
+ )
114
+ prompt = (
115
+ "You are a medical AI assistant. Analyze this wound image and patient data.\n\n"
116
+ f"Patient: {patient_info}\n"
117
+ f"Wound: {visual_results.get('wound_type', 'Unknown')} - "
118
+ f"{visual_results.get('length_cm', 0)}Γ—{visual_results.get('breadth_cm', 0)} cm\n\n"
119
+ "Provide a structured report with:\n"
120
+ "1. Clinical Summary\n2. Treatment Recommendations\n3. Risk Assessment\n4. Monitoring Plan\n"
121
  )
122
+ messages = [{"role": "user", "content": [
123
+ {"type": "image", "image": image_pil},
124
+ {"type": "text", "text": prompt},
125
+ ]}]
126
+ out = pipe(text=messages, max_new_tokens=max_new_tokens or 600, do_sample=False, temperature=0.7)
127
+ if out and len(out) > 0:
128
+ try:
129
+ return out[0]["generated_text"][-1].get("content", "").strip() or "⚠️ Empty response"
130
+ except Exception:
131
+ return (out[0].get("generated_text", "") or "").strip() or "⚠️ Empty response"
132
+ return "⚠️ No output generated"
133
+ except Exception as e:
134
+ logging.error(f"❌ MedGemma generation error: {e}")
135
+ return "⚠️ VLM error"
136
+
137
+ # ---------- Initialize CPU models ----------
138
+ def load_yolo_model():
139
+ YOLO = _import_ultralytics()
140
+ return YOLO(YOLO_MODEL_PATH)
141
+
142
+ def load_segmentation_model():
143
+ load_model = _import_tf_loader()
144
+ return load_model(SEG_MODEL_PATH, compile=False)
145
+
146
+ def load_classification_pipeline():
147
+ pipe = _import_hf_cls()
148
+ return pipe("image-classification", model="Hemg/Wound-classification", token=HF_TOKEN, device="cpu")
149
+
150
+ def load_embedding_model():
151
+ Emb = _import_embeddings()
152
+ return Emb(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
153
+
154
+ def initialize_cpu_models() -> None:
155
+ if HF_TOKEN:
156
+ try:
157
+ HfApi, HfFolder = _import_hf_hub()
158
+ HfFolder.save_token(HF_TOKEN)
159
+ logging.info("βœ… HF token set")
160
+ except Exception as e:
161
+ logging.warning(f"HF token save failed: {e}")
162
 
163
+ if "det" not in models_cache:
164
+ try:
165
+ models_cache["det"] = load_yolo_model()
166
+ logging.info("βœ… YOLO loaded (CPU)")
167
+ except Exception as e:
168
+ logging.error(f"YOLO load failed: {e}")
169
+
170
+ if "seg" not in models_cache:
171
+ try:
172
+ if os.path.exists(SEG_MODEL_PATH):
173
+ models_cache["seg"] = load_segmentation_model()
174
+ m = models_cache["seg"]
175
+ ishape = getattr(m, "input_shape", None)
176
+ oshape = getattr(m, "output_shape", None)
177
+ logging.info(f"βœ… Segmentation model loaded (CPU) | input_shape={ishape} output_shape={oshape}")
178
+ else:
179
+ models_cache["seg"] = None
180
+ logging.warning("Segmentation model file missing; skipping.")
181
+ except Exception as e:
182
+ models_cache["seg"] = None
183
+ logging.warning(f"Segmentation unavailable: {e}")
184
 
185
+ if "cls" not in models_cache:
186
+ try:
187
+ models_cache["cls"] = load_classification_pipeline()
188
+ logging.info("βœ… Classifier loaded (CPU)")
189
+ except Exception as e:
190
+ models_cache["cls"] = None
191
+ logging.warning(f"Classifier unavailable: {e}")
192
+
193
+ if "embedding_model" not in models_cache:
194
+ try:
195
+ models_cache["embedding_model"] = load_embedding_model()
196
+ logging.info("βœ… Embeddings loaded (CPU)")
197
+ except Exception as e:
198
+ models_cache["embedding_model"] = None
199
+ logging.warning(f"Embeddings unavailable: {e}")
200
+
201
+ def setup_knowledge_base() -> None:
202
+ if "vector_store" in knowledge_base_cache:
203
+ return
204
+ docs: List = []
205
+ try:
206
+ PyPDFLoader = _import_langchain_pdf()
207
+ for pdf in GUIDELINE_PDFS:
208
+ if os.path.exists(pdf):
209
+ try:
210
+ docs.extend(PyPDFLoader(pdf).load())
211
+ logging.info(f"Loaded PDF: {pdf}")
212
+ except Exception as e:
213
+ logging.warning(f"PDF load failed ({pdf}): {e}")
214
+ except Exception as e:
215
+ logging.warning(f"LangChain PDF loader unavailable: {e}")
216
+
217
+ if docs and models_cache.get("embedding_model"):
218
+ try:
219
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
220
+ FAISS = _import_langchain_faiss()
221
+ chunks = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100).split_documents(docs)
222
+ knowledge_base_cache["vector_store"] = FAISS.from_documents(chunks, models_cache["embedding_model"])
223
+ logging.info(f"βœ… Knowledge base ready ({len(chunks)} chunks)")
224
+ except Exception as e:
225
+ knowledge_base_cache["vector_store"] = None
226
+ logging.warning(f"KB build failed: {e}")
227
+ else:
228
+ knowledge_base_cache["vector_store"] = None
229
+ logging.warning("KB disabled (no docs or embeddings).")
230
+
231
+ initialize_cpu_models()
232
+ setup_knowledge_base()
233
+
234
+ # ---------- Calibration helpers ----------
235
+ def _exif_to_dict(pil_img: Image.Image) -> Dict[str, object]:
236
+ out = {}
237
+ try:
238
+ exif = pil_img.getexif()
239
+ if not exif:
240
+ return out
241
+ for k, v in exif.items():
242
+ tag = TAGS.get(k, k)
243
+ out[tag] = v
244
+ except Exception:
245
+ pass
246
+ return out
247
+
248
+ def _to_float(val) -> Optional[float]:
249
  try:
250
+ if val is None:
251
+ return None
252
+ if isinstance(val, tuple) and len(val) == 2:
253
+ num, den = float(val[0]), float(val[1]) if float(val[1]) != 0 else 1.0
254
+ return num / den
255
+ return float(val)
256
  except Exception:
257
+ return None
258
+
259
+ def _estimate_sensor_width_mm(f_mm: Optional[float], f35: Optional[float]) -> Optional[float]:
260
+ if f_mm and f35 and f35 > 0:
261
+ return 36.0 * f_mm / f35
262
+ return None
263
+
264
+ def estimate_px_per_cm_from_exif(pil_img: Image.Image, default_px_per_cm: float = DEFAULT_PX_PER_CM) -> Tuple[float, Dict]:
265
+ meta = {"used": "default", "f_mm": None, "f35": None, "sensor_w_mm": None, "distance_m": None}
266
+ try:
267
+ exif = _exif_to_dict(pil_img)
268
+ f_mm = _to_float(exif.get("FocalLength"))
269
+ f35 = _to_float(exif.get("FocalLengthIn35mmFilm") or exif.get("FocalLengthIn35mm"))
270
+ subj_dist_m = _to_float(exif.get("SubjectDistance"))
271
+ sensor_w_mm = _estimate_sensor_width_mm(f_mm, f35)
272
+ meta.update({"f_mm": f_mm, "f35": f35, "sensor_w_mm": sensor_w_mm, "distance_m": subj_dist_m})
273
+
274
+ if f_mm and sensor_w_mm and subj_dist_m and subj_dist_m > 0:
275
+ w_px = pil_img.width
276
+ field_w_mm = sensor_w_mm * (subj_dist_m * 1000.0) / f_mm
277
+ field_w_cm = field_w_mm / 10.0
278
+ px_per_cm = w_px / max(field_w_cm, 1e-6)
279
+ px_per_cm = float(np.clip(px_per_cm, PX_PER_CM_MIN, PX_PER_CM_MAX))
280
+ meta["used"] = "exif"
281
+ return px_per_cm, meta
282
+ return float(default_px_per_cm), meta
283
+ except Exception:
284
+ return float(default_px_per_cm), meta
285
+
286
+ # ---------- Segmentation helpers ----------
287
+ def _imagenet_norm(arr: np.ndarray) -> np.ndarray:
288
+ mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
289
+ std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
290
+ return (arr.astype(np.float32) - mean) / std
291
+
292
+ def _preprocess_for_seg(bgr_roi: np.ndarray, target_hw: Tuple[int, int]) -> np.ndarray:
293
+ H, W = target_hw
294
+ resized = cv2.resize(bgr_roi, (W, H), interpolation=cv2.INTER_LINEAR)
295
+ if SEG_EXPECTS_RGB:
296
+ resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
297
+ if SEG_NORM.lower() == "imagenet":
298
+ x = _imagenet_norm(resized)
299
+ else:
300
+ x = resized.astype(np.float32) / 255.0
301
+ x = np.expand_dims(x, axis=0) # (1,H,W,3)
302
+ return x
303
+
304
+ def _to_prob(pred: np.ndarray) -> np.ndarray:
305
+ p = np.squeeze(pred)
306
+ pmin, pmax = float(p.min()), float(p.max())
307
+ if pmax > 1.0 or pmin < 0.0:
308
+ p = 1.0 / (1.0 + np.exp(-p))
309
+ return p.astype(np.float32)
310
+
311
+ # ---- Adaptive threshold + GrabCut grow ----
312
+ def _adaptive_prob_threshold(p: np.ndarray) -> float:
313
+ """
314
+ Choose a threshold that avoids tiny blobs while not swallowing skin.
315
+ Try Otsu and the 90th percentile, clamp to [0.25, 0.65], pick by area heuristic.
316
+ """
317
+ p01 = np.clip(p.astype(np.float32), 0, 1)
318
+ p255 = (p01 * 255).astype(np.uint8)
319
+
320
+ ret_otsu, _ = cv2.threshold(p255, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
321
+ thr_otsu = float(np.clip(ret_otsu / 255.0, 0.25, 0.65))
322
+ thr_pctl = float(np.clip(np.percentile(p01, 90), 0.25, 0.65))
323
+
324
+ def area_frac(thr: float) -> float:
325
+ return float((p01 >= thr).sum()) / float(p01.size)
326
+
327
+ af_otsu = area_frac(thr_otsu)
328
+ af_pctl = area_frac(thr_pctl)
329
+
330
+ def score(af: float) -> float:
331
+ target_low, target_high = 0.03, 0.10
332
+ if af < target_low: return abs(af - target_low) * 3.0
333
+ if af > target_high: return abs(af - target_high) * 1.5
334
+ return 0.0
335
+
336
+ return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
337
+
338
+ def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
339
+ """Grow from a confident core into low-contrast margins."""
340
+ h, w = bgr.shape[:2]
341
+ gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
342
+ k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
343
+ seed_dil = cv2.dilate(seed01, k, iterations=1)
344
+ gc[seed01.astype(bool)] = cv2.GC_PR_FGD
345
+ gc[seed_dil.astype(bool)] = cv2.GC_FGD
346
+ gc[0, :], gc[-1, :], gc[:, 0], gc[:, -1] = cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD
347
+ bgdModel = np.zeros((1, 65), np.float64)
348
+ fgdModel = np.zeros((1, 65), np.float64)
349
+ cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
350
+ return np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
351
+
352
+ def _fill_holes(mask01: np.ndarray) -> np.ndarray:
353
+ h, w = mask01.shape[:2]
354
+ ff = np.zeros((h + 2, w + 2), np.uint8)
355
+ m = (mask01 * 255).astype(np.uint8).copy()
356
+ cv2.floodFill(m, ff, (0, 0), 255)
357
+ m_inv = cv2.bitwise_not(m)
358
+ out = ((mask01 * 255) | m_inv) // 255
359
+ return out.astype(np.uint8)
360
+
361
+ def _clean_mask(mask01: np.ndarray) -> np.ndarray:
362
+ """Open β†’ Close β†’ Fill holes β†’ Largest component (no dilation)."""
363
+ mask01 = (mask01 > 0).astype(np.uint8)
364
+ k3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
365
+ k5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
366
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k3, iterations=1)
367
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k5, iterations=1)
368
+ mask01 = _fill_holes(mask01)
369
+ # Keep largest component only
370
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(mask01, 8)
371
+ if num > 1:
372
+ areas = stats[1:, cv2.CC_STAT_AREA]
373
+ if areas.size:
374
+ largest_idx = 1 + int(np.argmax(areas))
375
+ mask01 = (labels == largest_idx).astype(np.uint8)
376
+ return (mask01 > 0).astype(np.uint8)
377
+
378
+ # Global last debug dict (per-process)
379
+ _last_seg_debug: Dict[str, object] = {}
380
+
381
+ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndarray, Dict[str, object]]:
382
+ """
383
+ TF model β†’ adaptive threshold on prob β†’ GrabCut grow β†’ cleanup.
384
+ Fallback: KMeans-Lab.
385
+ Returns (mask_uint8_0_255, debug_dict)
386
+ """
387
+ debug = {"used": None, "reason": None, "positive_fraction": 0.0,
388
+ "thr": None, "heatmap_path": None, "roi_seen_by_model": None}
389
+
390
+ seg_model = models_cache.get("seg", None)
391
+
392
+ # --- Model path ---
393
+ if seg_model is not None:
394
+ try:
395
+ ishape = getattr(seg_model, "input_shape", None)
396
+ if not ishape or len(ishape) < 4:
397
+ raise ValueError(f"Bad seg input_shape: {ishape}")
398
+ th, tw = int(ishape[1]), int(ishape[2])
399
+
400
+ x = _preprocess_for_seg(image_bgr, (th, tw))
401
+ roi_seen_path = None
402
+ if SMARTHEAL_DEBUG:
403
+ roi_seen_path = os.path.join(out_dir, f"roi_for_seg_{ts}.png")
404
+ cv2.imwrite(roi_seen_path, image_bgr)
405
+
406
+ pred = seg_model.predict(x, verbose=0)
407
+ if isinstance(pred, (list, tuple)): pred = pred[0]
408
+ p = _to_prob(pred)
409
+ p = cv2.resize(p, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
410
+
411
+ heatmap_path = None
412
+ if SMARTHEAL_DEBUG:
413
+ hm = (np.clip(p, 0, 1) * 255).astype(np.uint8)
414
+ heat = cv2.applyColorMap(hm, cv2.COLORMAP_JET)
415
+ heatmap_path = os.path.join(out_dir, f"seg_pred_heatmap_{ts}.png")
416
+ cv2.imwrite(heatmap_path, heat)
417
 
418
+ thr = _adaptive_prob_threshold(p)
419
+ core01 = (p >= thr).astype(np.uint8)
420
+ core_frac = float(core01.sum()) / float(core01.size)
421
+
422
+ if core_frac < 0.005:
423
+ thr2 = max(thr - 0.10, 0.15)
424
+ core01 = (p >= thr2).astype(np.uint8)
425
+ thr = thr2
426
+ core_frac = float(core01.sum()) / float(core01.size)
427
+
428
+ if core01.any():
429
+ gc01 = _grabcut_refine(image_bgr, core01, iters=3)
430
+ mask01 = _clean_mask(gc01)
431
+ else:
432
+ mask01 = np.zeros(core01.shape, np.uint8)
433
+
434
+ pos_frac = float(mask01.sum()) / float(mask01.size)
435
+ logging.info(f"SegModel USED | thr={float(thr):.2f} core_frac={core_frac:.4f} final_frac={pos_frac:.4f}")
436
+
437
+ debug.update({
438
+ "used": "tf_model",
439
+ "reason": "ok",
440
+ "positive_fraction": pos_frac,
441
+ "thr": float(thr),
442
+ "heatmap_path": heatmap_path,
443
+ "roi_seen_by_model": roi_seen_path
444
+ })
445
+ return (mask01 * 255).astype(np.uint8), debug
446
+
447
+ except Exception as e:
448
+ logging.warning(f"⚠️ Segmentation model failed β†’ fallback. Reason: {e}")
449
+ debug.update({"used": "fallback_kmeans", "reason": f"model_failed: {e}"})
450
+
451
+ # --- Fallback: KMeans in Lab (reddest cluster as wound) ---
452
+ Z = image_bgr.reshape((-1, 3)).astype(np.float32)
453
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
454
+ _, labels, centers = cv2.kmeans(Z, 2, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
455
+ centers_u8 = centers.astype(np.uint8).reshape(1, 2, 3)
456
+ centers_lab = cv2.cvtColor(centers_u8, cv2.COLOR_BGR2LAB)[0]
457
+ wound_idx = int(np.argmax(centers_lab[:, 1])) # maximize a* (red)
458
+ mask01 = (labels.reshape(image_bgr.shape[:2]) == wound_idx).astype(np.uint8)
459
+ mask01 = _clean_mask(mask01)
460
+
461
+ pos_frac = float(mask01.sum()) / float(mask01.size)
462
+ logging.info(f"KMeans USED | final_frac={pos_frac:.4f}")
463
+
464
+ debug.update({
465
+ "used": "fallback_kmeans",
466
+ "reason": debug.get("reason") or "no_model",
467
+ "positive_fraction": pos_frac,
468
+ "thr": None
469
+ })
470
+ return (mask01 * 255).astype(np.uint8), debug
471
+
472
+ # ---------- Measurement + overlay helpers ----------
473
+ def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.ndarray:
474
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(binary01.astype(np.uint8), connectivity=8)
475
+ if num <= 1:
476
+ return binary01.astype(np.uint8)
477
+ areas = stats[1:, cv2.CC_STAT_AREA]
478
+ if areas.size == 0 or areas.max() < min_area_px:
479
+ return binary01.astype(np.uint8)
480
+ largest_idx = 1 + int(np.argmax(areas))
481
+ return (labels == largest_idx).astype(np.uint8)
482
+
483
+ def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, float, Tuple]:
484
+ contours, _ = cv2.findContours(mask01.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
485
+ if not contours:
486
+ return 0.0, 0.0, (None, None)
487
+ cnt = max(contours, key=cv2.contourArea)
488
+ rect = cv2.minAreaRect(cnt)
489
+ (w_px, h_px) = rect[1]
490
+ length_px, breadth_px = (max(w_px, h_px), min(w_px, h_px))
491
+ length_cm = round(length_px / max(px_per_cm, 1e-6), 2)
492
+ breadth_cm = round(breadth_px / max(px_per_cm, 1e-6), 2)
493
+ box = cv2.boxPoints(rect).astype(int)
494
+ return length_cm, breadth_cm, (box, rect[0])
495
+
496
+ def area_cm2_from_contour(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, Optional[np.ndarray]]:
497
+ """Area from largest polygon (sub-pixel); returns (area_cm2, contour)."""
498
+ m = (mask01 > 0).astype(np.uint8)
499
+ contours, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
500
+ if not contours:
501
+ return 0.0, None
502
+ cnt = max(contours, key=cv2.contourArea)
503
+ poly_area_px2 = float(cv2.contourArea(cnt))
504
+ area_cm2 = round(poly_area_px2 / (max(px_per_cm, 1e-6) ** 2), 2)
505
+ return area_cm2, cnt
506
+
507
+ def clamp_area_with_minrect(cnt: np.ndarray, px_per_cm: float, area_cm2_poly: float) -> float:
508
+ rect = cv2.minAreaRect(cnt)
509
+ (w_px, h_px) = rect[1]
510
+ rect_area_px2 = float(max(w_px, 0.0) * max(h_px, 0.0))
511
+ rect_area_cm2 = rect_area_px2 / (max(px_per_cm, 1e-6) ** 2)
512
+ return round(min(area_cm2_poly, rect_area_cm2 * 1.05), 2)
513
+
514
+ def draw_measurement_overlay(
515
+ base_bgr: np.ndarray,
516
+ mask01: np.ndarray,
517
+ rect_box: np.ndarray,
518
+ length_cm: float,
519
+ breadth_cm: float,
520
+ thickness: int = 2
521
+ ) -> np.ndarray:
522
+ """
523
+ 1) Strong red mask overlay + white contour
524
+ 2) Min-area rectangle
525
+ 3) Double-headed arrows labeled Length/Width
526
+ """
527
+ overlay = base_bgr.copy()
528
+
529
+ # Mask tint
530
+ mask255 = (mask01 * 255).astype(np.uint8)
531
+ mask3 = cv2.merge([mask255, mask255, mask255])
532
+ red = np.zeros_like(overlay); red[:] = (0, 0, 255)
533
+ alpha = 0.55
534
+ tinted = cv2.addWeighted(overlay, 1 - alpha, red, alpha, 0)
535
+ overlay = np.where(mask3 > 0, tinted, overlay)
536
+
537
+ # Contour
538
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
539
+ if cnts:
540
+ cv2.drawContours(overlay, cnts, -1, (255, 255, 255), 2)
541
+
542
+ if rect_box is not None:
543
+ cv2.polylines(overlay, [rect_box], True, (255, 255, 255), thickness)
544
+ pts = rect_box.reshape(-1, 2)
545
+
546
+ def midpoint(a, b): return (int((a[0] + b[0]) / 2), int((a[1] + b[1]) / 2))
547
+ e = [np.linalg.norm(pts[i] - pts[(i + 1) % 4]) for i in range(4)]
548
+ long_edge_idx = int(np.argmax(e))
549
+ mids = [midpoint(pts[i], pts[(i + 1) % 4]) for i in range(4)]
550
+ long_pair = (long_edge_idx, (long_edge_idx + 2) % 4)
551
+ short_pair = ((long_edge_idx + 1) % 4, (long_edge_idx + 3) % 4)
552
+
553
+ def draw_double_arrow(img, p1, p2):
554
+ cv2.arrowedLine(img, p1, p2, (0, 0, 0), thickness + 2, tipLength=0.05)
555
+ cv2.arrowedLine(img, p2, p1, (0, 0, 0), thickness + 2, tipLength=0.05)
556
+ cv2.arrowedLine(img, p1, p2, (255, 255, 255), thickness, tipLength=0.05)
557
+ cv2.arrowedLine(img, p2, p1, (255, 255, 255), thickness, tipLength=0.05)
558
+
559
+ def put_label(text, anchor):
560
+ org = (anchor[0] + 6, anchor[1] - 6)
561
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 4, cv2.LINE_AA)
562
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
563
+
564
+ draw_double_arrow(overlay, mids[long_pair[0]], mids[long_pair[1]])
565
+ draw_double_arrow(overlay, mids[short_pair[0]], mids[short_pair[1]])
566
+ put_label(f"Length: {length_cm:.2f} cm", mids[long_pair[0]])
567
+ put_label(f"Width: {breadth_cm:.2f} cm", mids[short_pair[0]])
568
+
569
+ return overlay
570
+
571
+ # ---------- AI PROCESSOR ----------
572
+ class AIProcessor:
573
+ def __init__(self):
574
+ self.models_cache = models_cache
575
+ self.knowledge_base_cache = knowledge_base_cache
576
+ self.uploads_dir = UPLOADS_DIR
577
+ self.dataset_id = DATASET_ID
578
+ self.hf_token = HF_TOKEN
579
+
580
+ def _ensure_analysis_dir(self) -> str:
581
+ out_dir = os.path.join(self.uploads_dir, "analysis")
582
+ os.makedirs(out_dir, exist_ok=True)
583
+ return out_dir
584
+
585
+ def perform_visual_analysis(self, image_pil: Image.Image) -> Dict:
586
+ """
587
+ YOLO detect β†’ crop ROI β†’ segment_wound(ROI) β†’ clean mask β†’
588
+ minAreaRect measurement (cm) using EXIF px/cm β†’ save outputs.
589
+ """
590
+ try:
591
+ px_per_cm, exif_meta = estimate_px_per_cm_from_exif(image_pil, DEFAULT_PX_PER_CM)
592
+ # Guardrails for calibration to avoid huge area blow-ups
593
+ px_per_cm = float(np.clip(px_per_cm, 20.0, 350.0))
594
+ if (exif_meta or {}).get("used") != "exif":
595
+ logging.warning(f"Calibration fallback used: px_per_cm={px_per_cm:.2f} (default). Prefer ruler/Aruco for accuracy.")
596
+
597
+ image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
598
+
599
+ # --- Detection ---
600
+ det_model = self.models_cache.get("det")
601
+ if det_model is None:
602
+ raise RuntimeError("YOLO model not loaded")
603
+ results = det_model.predict(image_cv, verbose=False, device="cpu")
604
+ if (not results) or (not getattr(results[0], "boxes", None)) or (len(results[0].boxes) == 0):
605
+ try:
606
+ import gradio as gr
607
+ raise gr.Error("No wound could be detected.")
608
+ except Exception:
609
+ raise RuntimeError("No wound could be detected.")
610
+
611
+ box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
612
+ x1, y1, x2, y2 = [int(v) for v in box]
613
+ x1, y1 = max(0, x1), max(0, y1)
614
+ x2, y2 = min(image_cv.shape[1], x2), min(image_cv.shape[0], y2)
615
+ roi = image_cv[y1:y2, x1:x2].copy()
616
+ if roi.size == 0:
617
+ try:
618
+ import gradio as gr
619
+ raise gr.Error("Detected ROI is empty.")
620
+ except Exception:
621
+ raise RuntimeError("Detected ROI is empty.")
622
+
623
+ out_dir = self._ensure_analysis_dir()
624
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
625
+
626
+ # --- Segmentation (model-first + KMeans fallback) ---
627
+ mask_u8_255, seg_debug = segment_wound(roi, ts, out_dir)
628
+ mask01 = (mask_u8_255 > 127).astype(np.uint8)
629
+
630
+ if mask01.any():
631
+ mask01 = _clean_mask(mask01)
632
+ logging.debug(f"Mask postproc: px_after={int(mask01.sum())}")
633
+
634
+ # --- Measurement (accurate & conservative) ---
635
+ if mask01.any():
636
+ length_cm, breadth_cm, (box_pts, _) = measure_min_area_rect(mask01, px_per_cm)
637
+ area_poly_cm2, largest_cnt = area_cm2_from_contour(mask01, px_per_cm)
638
+ if largest_cnt is not None:
639
+ surface_area_cm2 = clamp_area_with_minrect(largest_cnt, px_per_cm, area_poly_cm2)
640
+ else:
641
+ surface_area_cm2 = area_poly_cm2
642
+
643
+ anno_roi = draw_measurement_overlay(roi, mask01, box_pts, length_cm, breadth_cm)
644
+ segmentation_empty = False
645
+ else:
646
+ # Fallback if seg failed: use ROI dimensions
647
+ h_px = max(0, y2 - y1); w_px = max(0, x2 - x1)
648
+ length_cm = round(max(h_px, w_px) / px_per_cm, 2)
649
+ breadth_cm = round(min(h_px, w_px) / px_per_cm, 2)
650
+ surface_area_cm2 = round((h_px * w_px) / (px_per_cm ** 2), 2)
651
+ anno_roi = roi.copy()
652
+ cv2.rectangle(anno_roi, (2, 2), (anno_roi.shape[1]-3, anno_roi.shape[0]-3), (0, 0, 255), 3)
653
+ cv2.line(anno_roi, (0, 0), (anno_roi.shape[1]-1, anno_roi.shape[0]-1), (0, 0, 255), 2)
654
+ cv2.line(anno_roi, (anno_roi.shape[1]-1, 0), (0, anno_roi.shape[0]-1), (0, 0, 255), 2)
655
+ box_pts = None
656
+ segmentation_empty = True
657
+
658
+ # --- Save visualizations ---
659
+ original_path = os.path.join(out_dir, f"original_{ts}.png")
660
+ cv2.imwrite(original_path, image_cv)
661
+
662
+ det_vis = image_cv.copy()
663
+ cv2.rectangle(det_vis, (x1, y1), (x2, y2), (0, 255, 0), 2)
664
+ detection_path = os.path.join(out_dir, f"detection_{ts}.png")
665
+ cv2.imwrite(detection_path, det_vis)
666
+
667
+ roi_mask_path = os.path.join(out_dir, f"roi_mask_{ts}.png")
668
+ cv2.imwrite(roi_mask_path, (mask01 * 255).astype(np.uint8))
669
+
670
+ # ROI overlay (mask tint + contour, without arrows)
671
+ mask255 = (mask01 * 255).astype(np.uint8)
672
+ mask3 = cv2.merge([mask255, mask255, mask255])
673
+ red = np.zeros_like(roi); red[:] = (0, 0, 255)
674
+ alpha = 0.55
675
+ tinted = cv2.addWeighted(roi, 1 - alpha, red, alpha, 0)
676
+ if mask255.any():
677
+ roi_overlay = np.where(mask3 > 0, tinted, roi)
678
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
679
+ cv2.drawContours(roi_overlay, cnts, -1, (255, 255, 255), 2)
680
+ else:
681
+ roi_overlay = anno_roi
682
+
683
+ seg_full = image_cv.copy()
684
+ seg_full[y1:y2, x1:x2] = roi_overlay
685
+ segmentation_path = os.path.join(out_dir, f"segmentation_{ts}.png")
686
+ cv2.imwrite(segmentation_path, seg_full)
687
+
688
+ segmentation_roi_path = os.path.join(out_dir, f"segmentation_roi_{ts}.png")
689
+ cv2.imwrite(segmentation_roi_path, roi_overlay)
690
+
691
+ # Annotated (mask + arrows + labels) in full-frame
692
+ anno_full = image_cv.copy()
693
+ anno_full[y1:y2, x1:x2] = anno_roi
694
+ annotated_seg_path = os.path.join(out_dir, f"segmentation_annotated_{ts}.png")
695
+ cv2.imwrite(annotated_seg_path, anno_full)
696
+
697
+ # --- Optional classification ---
698
+ wound_type = "Unknown"
699
+ cls_pipe = self.models_cache.get("cls")
700
+ if cls_pipe is not None:
701
+ try:
702
+ preds = cls_pipe(Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)))
703
+ if preds:
704
+ wound_type = max(preds, key=lambda x: x.get("score", 0)).get("label", "Unknown")
705
+ except Exception as e:
706
+ logging.warning(f"Classification failed: {e}")
707
+
708
+ # Log end-of-seg summary
709
+ seg_summary = {
710
+ "seg_used": seg_debug.get("used"),
711
+ "seg_reason": seg_debug.get("reason"),
712
+ "positive_fraction": round(float(seg_debug.get("positive_fraction", 0.0)), 6),
713
+ "threshold": seg_debug.get("thr"),
714
+ "segmentation_empty": segmentation_empty,
715
+ "exif_px_per_cm": round(px_per_cm, 3),
716
+ }
717
+ _log_kv("SEG_SUMMARY", seg_summary)
718
+
719
+ return {
720
+ "wound_type": wound_type,
721
+ "length_cm": length_cm,
722
+ "breadth_cm": breadth_cm,
723
+ "surface_area_cm2": surface_area_cm2,
724
+ "px_per_cm": round(px_per_cm, 2),
725
+ "calibration_meta": exif_meta,
726
+ "detection_confidence": float(results[0].boxes.conf[0].cpu().item())
727
+ if getattr(results[0].boxes, "conf", None) is not None else 0.0,
728
+ "detection_image_path": detection_path,
729
+ "segmentation_image_path": annotated_seg_path,
730
+ "segmentation_annotated_path": annotated_seg_path,
731
+ "segmentation_roi_path": segmentation_roi_path,
732
+ "roi_mask_path": roi_mask_path,
733
+ "segmentation_empty": segmentation_empty,
734
+ "segmentation_debug": seg_debug,
735
+ "original_image_path": original_path,
736
+ }
737
+ except Exception as e:
738
+ logging.error(f"Visual analysis failed: {e}", exc_info=True)
739
+ raise
740
+
741
+ # ---------- Knowledge base + reporting ----------
742
+ def query_guidelines(self, query: str) -> str:
743
+ try:
744
+ vs = self.knowledge_base_cache.get("vector_store")
745
+ if not vs:
746
+ return "Knowledge base is not available."
747
+ try:
748
+ retriever = vs.as_retriever(search_kwargs={"k": 5})
749
+ docs = retriever.get_relevant_documents(query)
750
+ except Exception:
751
+ retriever = vs.as_retriever(search_kwargs={"k": 5})
752
+ docs = retriever.invoke(query)
753
+ lines: List[str] = []
754
+ for d in docs:
755
+ src = (d.metadata or {}).get("source", "N/A")
756
+ txt = (d.page_content or "")[:300]
757
+ lines.append(f"Source: {src}\nContent: {txt}...")
758
+ return "\n\n".join(lines) if lines else "No relevant guideline snippets found."
759
+ except Exception as e:
760
+ logging.warning(f"Guidelines query failed: {e}")
761
+ return f"Guidelines query failed: {str(e)}"
762
+
763
+ def _generate_fallback_report(self, patient_info: str, visual_results: Dict, guideline_context: str) -> str:
764
+ return f"""# 🩺 SmartHeal AI - Comprehensive Wound Analysis Report
765
+ ## πŸ“‹ Patient Information
766
+ {patient_info}
767
+ ## πŸ” Visual Analysis Results
768
+ - **Wound Type**: {visual_results.get('wound_type', 'Unknown')}
769
+ - **Dimensions**: {visual_results.get('length_cm', 0)} cm Γ— {visual_results.get('breadth_cm', 0)} cm
770
+ - **Surface Area**: {visual_results.get('surface_area_cm2', 0)} cmΒ²
771
+ - **Detection Confidence**: {visual_results.get('detection_confidence', 0):.1%}
772
+ - **Calibration**: {visual_results.get('px_per_cm','?')} px/cm ({(visual_results.get('calibration_meta') or {}).get('used','default')})
773
+ ## πŸ“Š Analysis Images
774
+ - **Original**: {visual_results.get('original_image_path', 'N/A')}
775
+ - **Detection**: {visual_results.get('detection_image_path', 'N/A')}
776
+ - **Segmentation**: {visual_results.get('segmentation_image_path', 'N/A')}
777
+ - **Annotated**: {visual_results.get('segmentation_annotated_path', 'N/A')}
778
+ ## 🎯 Clinical Summary
779
+ Automated analysis provides quantitative measurements; verify via clinical examination.
780
+ ## πŸ’Š Recommendations
781
+ - Cleanse wound gently; select dressing per exudate/infection risk
782
+ - Debride necrotic tissue if indicated (clinical decision)
783
+ - Document with serial photos and measurements
784
+ ## πŸ“… Monitoring
785
+ - Daily in week 1, then every 2–3 days (or as indicated)
786
+ - Weekly progress review
787
+ ## πŸ“š Guideline Context
788
+ {(guideline_context or '')[:800]}{"..." if guideline_context and len(guideline_context) > 800 else ''}
789
+ **Disclaimer:** Automated, for decision support only. Verify clinically.
790
+ """
791
+
792
+ def generate_final_report(
793
+ self,
794
+ patient_info: str,
795
+ visual_results: Dict,
796
+ guideline_context: str,
797
+ image_pil: Image.Image,
798
+ max_new_tokens: Optional[int] = None,
799
+ ) -> str:
800
+ try:
801
+ report = generate_medgemma_report(
802
+ patient_info, visual_results, guideline_context, image_pil, max_new_tokens
803
+ )
804
+ if report and report.strip() and not report.startswith(("⚠️", "❌")):
805
+ return report
806
+ logging.warning("MedGemma unavailable/invalid; using fallback.")
807
+ return self._generate_fallback_report(patient_info, visual_results, guideline_context)
808
+ except Exception as e:
809
+ logging.error(f"Report generation failed: {e}")
810
+ return self._generate_fallback_report(patient_info, visual_results, guideline_context)
811
+
812
+ def save_and_commit_image(self, image_pil: Image.Image) -> str:
813
+ try:
814
+ os.makedirs(self.uploads_dir, exist_ok=True)
815
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
816
+ filename = f"{ts}.png"
817
+ path = os.path.join(self.uploads_dir, filename)
818
+ image_pil.convert("RGB").save(path)
819
+ logging.info(f"βœ… Image saved locally: {path}")
820
+
821
+ if HF_TOKEN and DATASET_ID:
822
+ try:
823
+ HfApi, HfFolder = _import_hf_hub()
824
+ HfFolder.save_token(HF_TOKEN)
825
+ api = HfApi()
826
+ api.upload_file(
827
+ path_or_fileobj=path,
828
+ path_in_repo=f"images/{filename}",
829
+ repo_id=DATASET_ID,
830
+ repo_type="dataset",
831
+ token=HF_TOKEN,
832
+ commit_message=f"Upload wound image: {filename}",
833
+ )
834
+ logging.info("βœ… Image committed to HF dataset")
835
+ except Exception as e:
836
+ logging.warning(f"HF upload failed: {e}")
837
+
838
+ return path
839
+ except Exception as e:
840
+ logging.error(f"Failed to save/commit image: {e}")
841
+ return ""
842
+
843
+ def full_analysis_pipeline(self, image_pil: Image.Image, questionnaire_data: Dict) -> Dict:
844
+ try:
845
+ saved_path = self.save_and_commit_image(image_pil)
846
+ visual_results = self.perform_visual_analysis(image_pil)
847
+
848
+ pi = questionnaire_data or {}
849
+ patient_info = (
850
+ f"Age: {pi.get('age','N/A')}, "
851
+ f"Diabetic: {pi.get('diabetic','N/A')}, "
852
+ f"Allergies: {pi.get('allergies','N/A')}, "
853
+ f"Date of Wound: {pi.get('date_of_injury','N/A')}, "
854
+ f"Professional Care: {pi.get('professional_care','N/A')}, "
855
+ f"Oozing/Bleeding: {pi.get('oozing_bleeding','N/A')}, "
856
+ f"Infection: {pi.get('infection','N/A')}, "
857
+ f"Moisture: {pi.get('moisture','N/A')}"
858
+ )
859
+
860
+ query = (
861
+ f"best practices for managing a {visual_results.get('wound_type','Unknown')} "
862
+ f"with moisture '{pi.get('moisture','unknown')}' and infection '{pi.get('infection','unknown')}' "
863
+ f"in a diabetic status '{pi.get('diabetic','unknown')}'"
864
+ )
865
+ guideline_context = self.query_guidelines(query)
866
+
867
+ report = self.generate_final_report(patient_info, visual_results, guideline_context, image_pil)
868
+
869
+ return {
870
+ "success": True,
871
+ "visual_analysis": visual_results,
872
+ "report": report,
873
+ "saved_image_path": saved_path,
874
+ "guideline_context": (guideline_context or "")[:500] + (
875
+ "..." if guideline_context and len(guideline_context) > 500 else ""
876
+ ),
877
+ }
878
+ except Exception as e:
879
+ logging.error(f"Pipeline error: {e}")
880
+ return {
881
+ "success": False,
882
+ "error": str(e),
883
+ "visual_analysis": {},
884
+ "report": f"Analysis failed: {str(e)}",
885
+ "saved_image_path": None,
886
+ "guideline_context": "",
887
+ }
888
+
889
+ def analyze_wound(self, image, questionnaire_data: Dict) -> Dict:
890
+ try:
891
+ if isinstance(image, str):
892
+ if not os.path.exists(image):
893
+ raise ValueError(f"Image file not found: {image}")
894
+ image_pil = Image.open(image)
895
+ elif isinstance(image, Image.Image):
896
+ image_pil = image
897
+ elif isinstance(image, np.ndarray):
898
+ image_pil = Image.fromarray(image)
899
+ else:
900
+ raise ValueError(f"Unsupported image type: {type(image)}")
901
+
902
+ return self.full_analysis_pipeline(image_pil, questionnaire_data or {})
903
+ except Exception as e:
904
+ logging.error(f"Wound analysis error: {e}")
905
+ return {
906
+ "success": False,
907
+ "error": str(e),
908
+ "visual_analysis": {},
909
+ "report": f"Analysis initialization failed: {str(e)}",
910
+ "saved_image_path": None,
911
+ "guideline_context": "",
912
+ }