malavikapradeep2001 commited on
Commit
05de78e
·
unverified ·
1 Parent(s): e38f6f9

Update backend/app.py

Browse files
Files changed (1) hide show
  1. backend/app.py +858 -100
backend/app.py CHANGED
@@ -1,7 +1,6 @@
1
  import os
2
  import shutil
3
 
4
-
5
  for d in ["/tmp/huggingface", "/tmp/Ultralytics", "/tmp/matplotlib", "/tmp/torch", "/root/.cache"]:
6
  shutil.rmtree(d, ignore_errors=True)
7
 
@@ -11,75 +10,263 @@ os.environ["TORCH_HOME"] = "/tmp/torch"
11
  os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
12
  os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
13
 
14
-
15
- from huggingface_hub import login
16
-
17
-
18
-
19
- hf_token = os.getenv("HF_TOKEN")
20
- if hf_token:
21
- login(token=hf_token)
22
-
 
 
 
23
  from fastapi import FastAPI, File, UploadFile, Form
24
  from fastapi.middleware.cors import CORSMiddleware
25
  from fastapi.responses import JSONResponse, FileResponse
 
 
26
  from fastapi.staticfiles import StaticFiles
27
- from ultralytics import YOLO
28
- from io import BytesIO
29
- from PIL import Image
30
  import uvicorn
31
- import json, os, uuid, numpy as np, torch, cv2, joblib, io, tensorflow as tf
32
- import torch.nn as nn
33
- import torchvision.transforms as transforms
34
- import torchvision.models as models
 
 
 
 
 
 
 
35
  from sklearn.preprocessing import MinMaxScaler
36
  from model import MWT as create_model
37
  from augmentations import Augmentations
38
- from model_histo import BreastCancerClassifier # TensorFlow model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
 
40
 
 
41
 
42
  # =====================================================
43
- # App setup
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  # =====================================================
45
- app = FastAPI(title="Unified Cervical & Breast Cancer Analysis API")
 
 
 
 
 
46
 
47
  app.add_middleware(
48
  CORSMiddleware,
49
- allow_origins=["*"],
50
  allow_credentials=True,
51
  allow_methods=["*"],
52
  allow_headers=["*"],
 
53
  )
54
 
55
- OUTPUT_DIR = "/tmp/outputs"
56
  os.makedirs(OUTPUT_DIR, exist_ok=True)
 
 
 
 
 
57
  app.mount("/outputs", StaticFiles(directory=OUTPUT_DIR), name="outputs")
58
 
59
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60
 
61
  # =====================================================
62
- # Model 1: YOLO (Colposcopy Detection)
 
 
63
  # =====================================================
 
64
  print("🔹 Loading YOLO model...")
65
  yolo_model = YOLO("best2.pt")
66
 
67
- # =====================================================
68
- # Model 2: MWT Classifier
69
- # =====================================================
70
  print("🔹 Loading MWT model...")
71
  mwt_model = create_model(num_classes=2).to(device)
72
  mwt_model.load_state_dict(torch.load("MWTclass2.pth", map_location=device))
73
  mwt_model.eval()
74
- mwt_class_names = ['Negative', 'Positive']
75
 
76
- # =====================================================
77
- # Model 3: CIN Classifier
78
- # =====================================================
79
  print("🔹 Loading CIN model...")
80
- clf = joblib.load("logistic_regression_model.pkl")
 
 
 
 
 
81
  yolo_colposcopy = YOLO("yolo_colposcopy.pt")
82
 
 
 
 
 
 
 
83
  def build_resnet(model_name="resnet50"):
84
  if model_name == "resnet50":
85
  model = models.resnet50(weights=models.ResNet50_Weights.DEFAULT)
@@ -92,7 +279,6 @@ def build_resnet(model_name="resnet50"):
92
  nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool),
93
  model.layer1, model.layer2, model.layer3, model.layer4,
94
  )
95
-
96
  gap = nn.AdaptiveAvgPool2d((1, 1))
97
  gmp = nn.AdaptiveMaxPool2d((1, 1))
98
  resnet50_blocks = build_resnet("resnet50")
@@ -100,29 +286,14 @@ resnet101_blocks = build_resnet("resnet101")
100
  resnet152_blocks = build_resnet("resnet152")
101
 
102
  transform = transforms.Compose([
103
- transforms.ToPILImage(),
104
- transforms.Resize((224, 224)),
105
- transforms.ToTensor(),
106
- transforms.Normalize(mean=[0.485, 0.456, 0.406],
107
- std=[0.229, 0.224, 0.225]),
108
  ])
109
 
110
- # =====================================================
111
- # Model 4: Histopathology Classifier (TensorFlow)
112
- # =====================================================
113
- print("🔹 Loading Breast Cancer Histopathology model...")
114
- classifier = BreastCancerClassifier(fine_tune=False)
115
- if not classifier.authenticate_huggingface():
116
- raise RuntimeError("HuggingFace authentication failed.")
117
- if not classifier.load_path_foundation():
118
- raise RuntimeError("Failed to load Path Foundation model.")
119
- model_path = "histopathology_trained_model.keras"
120
- classifier.model = tf.keras.models.load_model(model_path)
121
- print(f"✅ Loaded model from {model_path}")
122
 
123
- # =====================================================
124
- # Helper functions
125
- # =====================================================
126
  def preprocess_for_mwt(image_np):
127
  img = cv2.resize(image_np, (224, 224))
128
  img = Augmentations.Normalization((0, 1))(img)
@@ -145,44 +316,147 @@ def extract_cbf_features(blocks, img_t):
145
  p3 = gap(f3).view(-1)
146
  p4 = gap(f4).view(-1)
147
  p5 = gap(f5).view(-1)
148
- cbf_feature = torch.cat([p1, p2, p3, p4, p5], dim=0)
149
- return cbf_feature.cpu().numpy()
150
-
151
- def predict_histopathology(image: Image.Image):
152
- if image.mode != "RGB":
153
- image = image.convert("RGB")
154
- image = image.resize((224, 224))
155
- img_array = np.expand_dims(np.array(image).astype("float32") / 255.0, axis=0)
156
- embeddings = classifier.extract_embeddings(img_array)
157
- prediction_proba = classifier.model.predict(embeddings, verbose=0)[0]
158
- predicted_class = int(np.argmax(prediction_proba))
159
- class_names = ["Benign", "Malignant"]
160
- return {
161
- "model_used": "Breast Cancer Histopathology Classifier",
162
- "prediction": class_names[predicted_class],
163
- "confidence": float(np.max(prediction_proba))
164
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
  # =====================================================
167
- # Main endpoint
 
 
168
  # =====================================================
 
 
169
  @app.post("/predict/")
170
  async def predict(model_name: str = Form(...), file: UploadFile = File(...)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  contents = await file.read()
172
- image = Image.open(BytesIO(contents)).convert("RGB")
173
- image_np = np.array(image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  if model_name == "yolo":
176
  results = yolo_model(image)
177
  detections_json = results[0].to_json()
178
  detections = json.loads(detections_json)
 
 
 
 
 
 
 
179
  output_filename = f"detected_{uuid.uuid4().hex[:8]}.jpg"
180
- output_path = os.path.join(OUTPUT_DIR, output_filename)
181
  results[0].save(filename=output_path)
 
182
  return {
183
  "model_used": "YOLO Detection",
184
  "detections": detections,
185
- "annotated_image_url": f"/outputs/{output_filename}"
 
 
 
 
 
 
186
  }
187
 
188
  elif model_name == "mwt":
@@ -191,15 +465,35 @@ async def predict(model_name: str = Form(...), file: UploadFile = File(...)):
191
  output = mwt_model(tensor.to(device)).cpu()
192
  probs = torch.softmax(output, dim=1)[0]
193
  confidences = {mwt_class_names[i]: float(probs[i]) for i in range(2)}
194
- predicted_label = mwt_class_names[torch.argmax(probs)]
195
- return {"model_used": "MWT Classifier", "prediction": predicted_label, "confidence": confidences}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
  elif model_name == "cin":
 
 
 
 
 
198
  nparr = np.frombuffer(contents, np.uint8)
199
  img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
200
  results = yolo_colposcopy.predict(source=img, conf=0.7, save=False, verbose=False)
201
  if len(results[0].boxes) == 0:
202
  return {"error": "No cervix detected"}
 
203
  x1, y1, x2, y2 = map(int, results[0].boxes.xyxy[0].cpu().numpy())
204
  crop = img[y1:y2, x1:x2]
205
  crop = cv2.resize(crop, (224, 224))
@@ -211,52 +505,516 @@ async def predict(model_name: str = Form(...), file: UploadFile = File(...)):
211
  X_scaled = MinMaxScaler().fit_transform(features)
212
  pred = clf.predict(X_scaled)[0]
213
  proba = clf.predict_proba(X_scaled)[0]
214
- classes = ["CIN1", "CIN2", "CIN3"]
 
215
  predicted_label = classes[pred]
216
- predicted_confidence = float(proba[pred])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
  return {
218
  "model_used": "CIN Classifier",
219
- "prediction": predicted_label,
220
- "confidence": predicted_confidence
 
 
 
 
 
221
  }
222
-
223
-
224
  elif model_name == "histopathology":
225
- result = predict_histopathology(image)
226
- return result
 
227
 
228
  else:
229
  return JSONResponse(content={"error": "Invalid model name"}, status_code=400)
230
 
231
- @app.on_event("startup")
232
- async def cleanup_on_startup():
233
- for d in ["/tmp/huggingface", "/tmp/Ultralytics", "/tmp/matplotlib", "/tmp/torch", "/root/.cache"]:
234
- shutil.rmtree(d, ignore_errors=True)
235
- print("✅ Cleaned caches on startup")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
  @app.get("/models")
239
  def get_models():
240
  return {"available_models": ["yolo", "mwt", "cin", "histopathology"]}
241
 
242
-
243
  @app.get("/health")
244
  def health():
245
- return {"message": "Unified Cervical & Breast Cancer API is running!"}
 
 
 
 
246
 
247
- # After other app.mount()s
248
- app.mount("/outputs", StaticFiles(directory=OUTPUT_DIR), name="outputs")
249
- app.mount("/assets", StaticFiles(directory="frontend/dist/assets"), name="assets")
250
- from fastapi.staticfiles import StaticFiles
251
 
252
- app.mount("/", StaticFiles(directory="frontend/dist", html=True), name="static")
 
 
253
 
 
 
 
 
254
 
255
  @app.get("/")
256
  async def serve_frontend():
257
- index_path = os.path.join("frontend", "dist", "index.html")
258
- return FileResponse(index_path)
 
 
259
 
260
  if __name__ == "__main__":
261
- uvicorn.run(app, host="0.0.0.0", port=7860)
262
-
 
 
1
  import os
2
  import shutil
3
 
 
4
  for d in ["/tmp/huggingface", "/tmp/Ultralytics", "/tmp/matplotlib", "/tmp/torch", "/root/.cache"]:
5
  shutil.rmtree(d, ignore_errors=True)
6
 
 
10
  os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
11
  os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
12
 
13
+ import json
14
+ import uuid
15
+ import datetime
16
+ import numpy as np
17
+ import torch
18
+ import cv2
19
+ import joblib
20
+ import torch.nn as nn
21
+ import torchvision.transforms as transforms
22
+ import torchvision.models as models
23
+ from io import BytesIO
24
+ from PIL import Image as PILImage
25
  from fastapi import FastAPI, File, UploadFile, Form
26
  from fastapi.middleware.cors import CORSMiddleware
27
  from fastapi.responses import JSONResponse, FileResponse
28
+ import tensorflow as tf
29
+ from model_histo import BreastCancerClassifier
30
  from fastapi.staticfiles import StaticFiles
 
 
 
31
  import uvicorn
32
+ try:
33
+ from reportlab.lib.pagesizes import letter
34
+ from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image as ReportLabImage
35
+ from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
36
+ from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY
37
+ from reportlab.lib.units import inch
38
+ from reportlab.lib.colors import navy, black
39
+ REPORTLAB_AVAILABLE = True
40
+ except ImportError:
41
+ REPORTLAB_AVAILABLE = False
42
+ from ultralytics import YOLO
43
  from sklearn.preprocessing import MinMaxScaler
44
  from model import MWT as create_model
45
  from augmentations import Augmentations
46
+ from huggingface_hub import InferenceClient
47
+
48
+ # =====================================================
49
+
50
+ # SETUP TEMP DIRS AND ENV
51
+
52
+ # =====================================================
53
+
54
+ for d in ["/tmp/huggingface", "/tmp/Ultralytics", "/tmp/matplotlib", "/tmp/torch"]:
55
+ shutil.rmtree(d, ignore_errors=True)
56
+
57
+ os.environ["HF_HOME"] = "/tmp/huggingface"
58
+ os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/huggingface"
59
+ os.environ["TORCH_HOME"] = "/tmp/torch"
60
+ os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
61
+ os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
62
 
63
+ # =====================================================
64
 
65
+ # HUGGING FACE CLIENT SETUP
66
 
67
  # =====================================================
68
+
69
+ HF_MODEL_ID = "mistralai/Mistral-7B-v0.1"
70
+ hf_token = os.getenv("HF_TOKEN")
71
+ client = None
72
+
73
+ if hf_token:
74
+ try:
75
+ client = InferenceClient(model=HF_MODEL_ID, token=hf_token)
76
+ print(f"✅ Hugging Face InferenceClient initialized for {HF_MODEL_ID}")
77
+ except Exception as e:
78
+ print("⚠️ Failed to initialize Hugging Face client:", e)
79
+ else:
80
+ print("⚠️ Warning: No HF_TOKEN found — summaries will be skipped.")
81
+
82
+ def generate_ai_summary(abnormal_cells, normal_cells, avg_confidence):
83
+ """Generate a brief medical interpretation using Mistral."""
84
+ if not client:
85
+ return "⚠️ Hugging Face client not initialized — skipping summary."
86
+
87
+ try:
88
+ prompt = f"""Act as a cytopathology expert providing a brief diagnostic interpretation.
89
+
90
+ Observed Cell Counts:
91
+ - {abnormal_cells} Abnormal Cells
92
+ - {normal_cells} Normal Cells
93
+ - Detection Confidence: {avg_confidence:.1f}%
94
+
95
+ Write a 2-3 sentence professional medical assessment focusing on:
96
+ 1. Cell count analysis
97
+ 2. Abnormality ratio ({abnormal_cells/(abnormal_cells + normal_cells)*100:.1f}%)
98
+ 3. Clinical significance
99
+
100
+ Use objective, scientific language suitable for a pathology report."""
101
+
102
+ # Use streaming to avoid StopIteration
103
+ response = client.text_generation(
104
+ prompt,
105
+ max_new_tokens=200,
106
+ temperature=0.7,
107
+ stream=False,
108
+ details=True,
109
+ stop_sequences=["\n\n", "###"]
110
+ )
111
+
112
+ # Handle different response formats
113
+ if hasattr(response, 'generated_text'):
114
+ return response.generated_text.strip()
115
+ elif isinstance(response, dict):
116
+ return response.get('generated_text', '').strip()
117
+ elif isinstance(response, str):
118
+ return response.strip()
119
+
120
+ # Fallback summary if response format is unexpected
121
+ ratio = abnormal_cells / (abnormal_cells + normal_cells) * 100 if (abnormal_cells + normal_cells) > 0 else 0
122
+ return f"Analysis shows {abnormal_cells} abnormal cells ({ratio:.1f}%) and {normal_cells} normal cells, with average detection confidence of {avg_confidence:.1f}%."
123
+
124
+ except Exception as e:
125
+ # Provide a structured fallback summary instead of error message
126
+ total = abnormal_cells + normal_cells
127
+ if total == 0:
128
+ return "No cells were detected in the sample. Consider re-scanning or adjusting detection parameters."
129
+
130
+ ratio = (abnormal_cells / total) * 100
131
+ severity = "high" if ratio > 70 else "moderate" if ratio > 30 else "low"
132
+
133
+ return f"Quantitative analysis detected {abnormal_cells} abnormal cells ({ratio:.1f}%) among {total} total cells, indicating {severity} abnormality ratio. Average detection confidence: {avg_confidence:.1f}%."
134
+
135
+
136
+ def generate_mwt_summary(predicted_label, confidences, avg_confidence):
137
+ """Generate a short MWT-specific interpretation using the HF client when available."""
138
+ if not client:
139
+ return "⚠️ Hugging Face client not initialized — skipping AI interpretation."
140
+
141
+ try:
142
+ prompt = f"""
143
+ You are a concise cytopathology expert. Given an MWT classifier result, write a 1-2 sentence professional interpretation suitable for embedding in a diagnostic report.
144
+
145
+ Result:
146
+ - Predicted label: {predicted_label}
147
+ - Confidence (average): {avg_confidence:.1f}%
148
+ - Class probabilities: {json.dumps(confidences)}
149
+
150
+ Provide guidance on the significance of the result and any suggested next steps in plain, objective language.
151
+ """
152
+
153
+ response = client.text_generation(
154
+ prompt,
155
+ max_new_tokens=120,
156
+ temperature=0.2,
157
+ stream=False,
158
+ details=True,
159
+ stop_sequences=["\n\n", "###"]
160
+ )
161
+
162
+ if hasattr(response, 'generated_text'):
163
+ return response.generated_text.strip()
164
+ elif isinstance(response, dict):
165
+ return response.get('generated_text', '').strip()
166
+ elif isinstance(response, str):
167
+ return response.strip()
168
+
169
+ return f"Result: {predicted_label} (avg confidence {avg_confidence:.1f}%)."
170
+ except Exception as e:
171
+ return f"Quantitative result: {predicted_label} with average confidence {avg_confidence:.1f}%."
172
+
173
+
174
+ def generate_cin_summary(predicted_grade, confidences, avg_confidence):
175
+ """Generate a short CIN-specific interpretation using the HF client when available."""
176
+ if not client:
177
+ return "⚠️ Hugging Face client not initialized — skipping AI interpretation."
178
+
179
+ try:
180
+ prompt = f"""
181
+ You are a concise gynecologic pathology expert. Given a CIN classifier result, write a 1-2 sentence professional interpretation suitable for a diagnostic report.
182
+
183
+ Result:
184
+ - Predicted grade: {predicted_grade}
185
+ - Confidence (average): {avg_confidence:.1f}%
186
+ - Class probabilities: {json.dumps(confidences)}
187
+
188
+ Provide a brief statement about clinical significance and suggested next steps (e.g., further colposcopic evaluation) in objective, clinical language.
189
+ """
190
+
191
+ response = client.text_generation(
192
+ prompt,
193
+ max_new_tokens=140,
194
+ temperature=0.2,
195
+ stream=False,
196
+ details=True,
197
+ stop_sequences=["\n\n", "###"]
198
+ )
199
+
200
+ if hasattr(response, 'generated_text'):
201
+ return response.generated_text.strip()
202
+ elif isinstance(response, dict):
203
+ return response.get('generated_text', '').strip()
204
+ elif isinstance(response, str):
205
+ return response.strip()
206
+
207
+ return f"Result: {predicted_grade} (avg confidence {avg_confidence:.1f}%)."
208
+ except Exception:
209
+ return f"Quantitative result: {predicted_grade} with average confidence {avg_confidence:.1f}%."
210
+
211
+
212
  # =====================================================
213
+
214
+ # FASTAPI SETUP
215
+
216
+ # =====================================================
217
+
218
+ app = FastAPI(title="Pathora Medical Diagnostic API")
219
 
220
  app.add_middleware(
221
  CORSMiddleware,
222
+ allow_origins=["*", "http://localhost:5173", "http://127.0.0.1:5173"],
223
  allow_credentials=True,
224
  allow_methods=["*"],
225
  allow_headers=["*"],
226
+ expose_headers=["*"] # Allow access to response headers
227
  )
228
 
229
+ OUTPUT_DIR = "outputs"
230
  os.makedirs(OUTPUT_DIR, exist_ok=True)
231
+
232
+ # Create image outputs dir
233
+ IMAGES_DIR = os.path.join(OUTPUT_DIR, "images")
234
+ os.makedirs(IMAGES_DIR, exist_ok=True)
235
+
236
  app.mount("/outputs", StaticFiles(directory=OUTPUT_DIR), name="outputs")
237
 
238
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
239
 
240
  # =====================================================
241
+
242
+ # MODEL LOADS
243
+
244
  # =====================================================
245
+
246
  print("🔹 Loading YOLO model...")
247
  yolo_model = YOLO("best2.pt")
248
 
 
 
 
249
  print("🔹 Loading MWT model...")
250
  mwt_model = create_model(num_classes=2).to(device)
251
  mwt_model.load_state_dict(torch.load("MWTclass2.pth", map_location=device))
252
  mwt_model.eval()
253
+ mwt_class_names = ["Negative", "Positive"]
254
 
 
 
 
255
  print("🔹 Loading CIN model...")
256
+ try:
257
+ clf = joblib.load("logistic_regression_model.pkl")
258
+ except Exception as e:
259
+ print(f"⚠️ CIN classifier not available (logistic_regression_model.pkl missing or invalid): {e}")
260
+ clf = None
261
+
262
  yolo_colposcopy = YOLO("yolo_colposcopy.pt")
263
 
264
+ # =====================================================
265
+
266
+ # RESNET FEATURE EXTRACTORS FOR CIN
267
+
268
+ # =====================================================
269
+
270
  def build_resnet(model_name="resnet50"):
271
  if model_name == "resnet50":
272
  model = models.resnet50(weights=models.ResNet50_Weights.DEFAULT)
 
279
  nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool),
280
  model.layer1, model.layer2, model.layer3, model.layer4,
281
  )
 
282
  gap = nn.AdaptiveAvgPool2d((1, 1))
283
  gmp = nn.AdaptiveMaxPool2d((1, 1))
284
  resnet50_blocks = build_resnet("resnet50")
 
286
  resnet152_blocks = build_resnet("resnet152")
287
 
288
  transform = transforms.Compose([
289
+ transforms.ToPILImage(),
290
+ transforms.Resize((224, 224)),
291
+ transforms.ToTensor(),
292
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
293
+ std=[0.229, 0.224, 0.225]),
294
  ])
295
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
 
 
 
297
  def preprocess_for_mwt(image_np):
298
  img = cv2.resize(image_np, (224, 224))
299
  img = Augmentations.Normalization((0, 1))(img)
 
316
  p3 = gap(f3).view(-1)
317
  p4 = gap(f4).view(-1)
318
  p5 = gap(f5).view(-1)
319
+ return torch.cat([p1, p2, p3, p4, p5], dim=0).cpu().numpy()
320
+
321
+ # =====================================================
322
+ # Model 4: Histopathology Classifier (TensorFlow)
323
+ # =====================================================
324
+ print("🔹 Attempting to load Breast Cancer Histopathology model...")
325
+
326
+ try:
327
+ classifier = BreastCancerClassifier(fine_tune=False)
328
+
329
+ # Safely handle Hugging Face token auth
330
+ hf_token = os.getenv("HF_TOKEN")
331
+ if hf_token:
332
+ if classifier.authenticate_huggingface():
333
+ print(" Hugging Face authentication successful.")
334
+ else:
335
+ print("⚠️ Warning: Hugging Face authentication failed, using local model only.")
336
+ else:
337
+ print("⚠️ HF_TOKEN not found in environment — skipping authentication.")
338
+
339
+ # Load Path Foundation model
340
+ if classifier.load_path_foundation():
341
+ print("✅ Loaded Path Foundation base model.")
342
+ else:
343
+ print("⚠️ Could not load Path Foundation base model, continuing with local weights only.")
344
+
345
+ # Load trained histopathology model
346
+ model_path = "histopathology_trained_model.keras"
347
+ if os.path.exists(model_path):
348
+ classifier.model = tf.keras.models.load_model(model_path)
349
+ print(f"✅ Loaded local histopathology model: {model_path}")
350
+ else:
351
+ print(f"⚠️ Model file not found: {model_path}")
352
+
353
+ except Exception as e:
354
+ classifier = None
355
+ print(f"❌ Error initializing histopathology model: {e}")
356
+
357
+ def predict_histopathology(image):
358
+ if classifier is None:
359
+ return {"error": "Histopathology model not available."}
360
+
361
+ try:
362
+ if image.mode != "RGB":
363
+ image = image.convert("RGB")
364
+ image = image.resize((224, 224))
365
+ img_array = np.expand_dims(np.array(image).astype("float32") / 255.0, axis=0)
366
+ embeddings = classifier.extract_embeddings(img_array)
367
+ prediction_proba = classifier.model.predict(embeddings, verbose=0)[0]
368
+ predicted_class = int(np.argmax(prediction_proba))
369
+ class_names = ["Benign", "Malignant"]
370
+
371
+ # Return confidence as dictionary with both class probabilities (like MWT/CIN)
372
+ confidences = {class_names[i]: float(prediction_proba[i]) for i in range(len(class_names))}
373
+ avg_confidence = float(np.max(prediction_proba)) * 100
374
+
375
+ return {
376
+ "model_used": "Histopathology Classifier",
377
+ "prediction": class_names[predicted_class],
378
+ "confidence": confidences,
379
+ "summary": {
380
+ "avg_confidence": round(avg_confidence, 2),
381
+ "ai_interpretation": f"Histopathological analysis indicates {class_names[predicted_class].lower()} tissue with {avg_confidence:.1f}% confidence.",
382
+ },
383
+ }
384
+ except Exception as e:
385
+ return {"error": f"Histopathology prediction failed: {e}"}
386
+
387
 
388
  # =====================================================
389
+
390
+ # MAIN ENDPOINT
391
+
392
  # =====================================================
393
+
394
+
395
  @app.post("/predict/")
396
  async def predict(model_name: str = Form(...), file: UploadFile = File(...)):
397
+ print(f"Received prediction request - model: {model_name}, file: {file.filename}")
398
+
399
+ # Validate model name
400
+ if model_name not in ["yolo", "mwt", "cin", "histopathology"]:
401
+ return JSONResponse(
402
+ content={
403
+ "error": f"Invalid model_name: {model_name}. Must be one of: yolo, mwt, cin, histopathology"
404
+ },
405
+ status_code=400
406
+ )
407
+
408
+ # Validate and read file
409
+ if not file.filename:
410
+ return JSONResponse(
411
+ content={"error": "No file provided"},
412
+ status_code=400
413
+ )
414
+
415
  contents = await file.read()
416
+ if len(contents) == 0:
417
+ return JSONResponse(
418
+ content={"error": "Empty file provided"},
419
+ status_code=400
420
+ )
421
+
422
+ # Attempt to open and validate image
423
+ try:
424
+ image = PILImage.open(BytesIO(contents)).convert("RGB")
425
+ image_np = np.array(image)
426
+ if image_np.size == 0:
427
+ raise ValueError("Empty image array")
428
+ print(f"Successfully loaded image, shape: {image_np.shape}")
429
+ except Exception as e:
430
+ return JSONResponse(
431
+ content={"error": f"Invalid image file: {str(e)}"},
432
+ status_code=400
433
+ )
434
 
435
  if model_name == "yolo":
436
  results = yolo_model(image)
437
  detections_json = results[0].to_json()
438
  detections = json.loads(detections_json)
439
+
440
+ abnormal_cells = sum(1 for d in detections if d["name"] == "abnormal")
441
+ normal_cells = sum(1 for d in detections if d["name"] == "normal")
442
+ avg_confidence = np.mean([d.get("confidence", 0) for d in detections]) * 100 if detections else 0
443
+
444
+ ai_summary = generate_ai_summary(abnormal_cells, normal_cells, avg_confidence)
445
+
446
  output_filename = f"detected_{uuid.uuid4().hex[:8]}.jpg"
447
+ output_path = os.path.join(IMAGES_DIR, output_filename)
448
  results[0].save(filename=output_path)
449
+
450
  return {
451
  "model_used": "YOLO Detection",
452
  "detections": detections,
453
+ "annotated_image_url": f"/outputs/images/{output_filename}",
454
+ "summary": {
455
+ "abnormal_cells": abnormal_cells,
456
+ "normal_cells": normal_cells,
457
+ "avg_confidence": round(float(avg_confidence), 2),
458
+ "ai_interpretation": ai_summary,
459
+ },
460
  }
461
 
462
  elif model_name == "mwt":
 
465
  output = mwt_model(tensor.to(device)).cpu()
466
  probs = torch.softmax(output, dim=1)[0]
467
  confidences = {mwt_class_names[i]: float(probs[i]) for i in range(2)}
468
+ predicted_label = mwt_class_names[int(torch.argmax(probs).item())]
469
+ # Average / primary confidence for display
470
+ avg_confidence = float(torch.max(probs).item()) * 100
471
+
472
+ # Generate a brief AI interpretation using the Mistral client (if available)
473
+ ai_interp = generate_mwt_summary(predicted_label, confidences, avg_confidence)
474
+
475
+ return {
476
+ "model_used": "MWT Classifier",
477
+ "prediction": predicted_label,
478
+ "confidence": confidences,
479
+ "summary": {
480
+ "avg_confidence": round(avg_confidence, 2),
481
+ "ai_interpretation": ai_interp,
482
+ },
483
+ }
484
 
485
  elif model_name == "cin":
486
+ if clf is None:
487
+ return JSONResponse(
488
+ content={"error": "CIN classifier not available on server."},
489
+ status_code=503,
490
+ )
491
  nparr = np.frombuffer(contents, np.uint8)
492
  img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
493
  results = yolo_colposcopy.predict(source=img, conf=0.7, save=False, verbose=False)
494
  if len(results[0].boxes) == 0:
495
  return {"error": "No cervix detected"}
496
+
497
  x1, y1, x2, y2 = map(int, results[0].boxes.xyxy[0].cpu().numpy())
498
  crop = img[y1:y2, x1:x2]
499
  crop = cv2.resize(crop, (224, 224))
 
505
  X_scaled = MinMaxScaler().fit_transform(features)
506
  pred = clf.predict(X_scaled)[0]
507
  proba = clf.predict_proba(X_scaled)[0]
508
+ # Get actual number of classes from model output
509
+ classes = ["Low-grade", "High-grade"] # Binary CIN classification
510
  predicted_label = classes[pred]
511
+ confidences = {classes[i]: float(proba[i]) for i in range(len(classes))}
512
+
513
+ # Map to more detailed classification based on confidence
514
+ if predicted_label == "High-grade" and confidences["High-grade"] > 0.8:
515
+ detailed_class = "CIN3"
516
+ elif predicted_label == "High-grade":
517
+ detailed_class = "CIN2"
518
+ else:
519
+ detailed_class = "CIN1"
520
+
521
+ # Average / primary confidence for display
522
+ avg_confidence = float(np.max(proba)) * 100
523
+
524
+ # Generate a brief AI interpretation using the Mistral client (if available)
525
+ ai_interp = generate_cin_summary(predicted_label, confidences, avg_confidence)
526
+
527
  return {
528
  "model_used": "CIN Classifier",
529
+ "prediction": detailed_class,
530
+ "grade": predicted_label,
531
+ "confidence": confidences,
532
+ "summary": {
533
+ "avg_confidence": round(avg_confidence, 2),
534
+ "ai_interpretation": ai_interp,
535
+ },
536
  }
 
 
537
  elif model_name == "histopathology":
538
+ result = predict_histopathology(image)
539
+ return result
540
+
541
 
542
  else:
543
  return JSONResponse(content={"error": "Invalid model name"}, status_code=400)
544
 
545
+ # =====================================================
546
+
547
+ # ROUTES
548
+
549
+ # =====================================================
550
+
551
+ def create_designed_pdf(pdf_path, report_data, analysis_summary_json):
552
+ doc = SimpleDocTemplate(pdf_path, pagesize=letter,
553
+ rightMargin=72, leftMargin=72,
554
+ topMargin=72, bottomMargin=18)
555
+ styles = getSampleStyleSheet()
556
+ story = []
557
+
558
+ styles.add(ParagraphStyle(name='Title', fontSize=20, fontName='Helvetica-Bold', alignment=TA_CENTER, textColor=navy))
559
+ styles.add(ParagraphStyle(name='Section', fontSize=14, fontName='Helvetica-Bold', spaceBefore=10, spaceAfter=6))
560
+ styles.add(ParagraphStyle(name='NormalSmall', fontSize=10, leading=12))
561
+ styles.add(ParagraphStyle(name='Heading', fontSize=16, fontName='Helvetica-Bold', textColor=navy, spaceBefore=6, spaceAfter=4))
562
+
563
+ patient = report_data['patient']
564
+ analysis = report_data.get('analysis', {})
565
+
566
+ # Safely parse analysis_summary_json
567
+ try:
568
+ ai_summary = json.loads(analysis_summary_json) if analysis_summary_json else {}
569
+ except (json.JSONDecodeError, TypeError):
570
+ ai_summary = {}
571
+
572
+ # Determine report type based on model used
573
+ model_used = ai_summary.get('model_used', '')
574
+ if 'YOLO' in model_used or 'yolo' in str(analysis.get('id', '')).lower():
575
+ report_type = "CYTOLOGY"
576
+ report_title = "Cytology Report"
577
+ elif 'CIN' in model_used or 'cin' in str(analysis.get('id', '')).lower() or 'colpo' in str(analysis.get('id', '')).lower():
578
+ report_type = "COLPOSCOPY"
579
+ report_title = "Colposcopy Report"
580
+ elif 'histo' in str(analysis.get('id', '')).lower() or 'histopathology' in model_used.lower():
581
+ report_type = "HISTOPATHOLOGY"
582
+ report_title = "Histopathology Report"
583
+ else:
584
+ report_type = "CYTOLOGY"
585
+ report_title = "Medical Analysis Report"
586
+
587
+ # Header
588
+ story.append(Paragraph("MANALIFE AI", styles['Title']))
589
+ story.append(Paragraph("Advanced Medical Analysis", styles['NormalSmall']))
590
+ story.append(Spacer(1, 0.3*inch))
591
+ story.append(Paragraph(f"MEDICAL ANALYSIS REPORT OF {report_type}", styles['Heading']))
592
+ story.append(Paragraph(report_title, styles['Section']))
593
+ story.append(Spacer(1, 0.2*inch))
594
+
595
+ # Report ID and Date
596
+ story.append(Paragraph(f"<b>Report ID:</b> {report_data.get('report_id', 'N/A')}", styles['NormalSmall']))
597
+ story.append(Paragraph(f"<b>Generated:</b> {datetime.datetime.now().strftime('%b %d, %Y, %I:%M %p')}", styles['NormalSmall']))
598
+ story.append(Spacer(1, 0.2*inch))
599
+
600
+ # Patient Information Section
601
+ story.append(Paragraph("Patient Information", styles['Section']))
602
+ story.append(Paragraph(f"<b>Patient ID:</b> {patient.get('id', 'N/A')}", styles['NormalSmall']))
603
+ story.append(Paragraph(f"<b>Exam Date:</b> {patient.get('exam_date', 'N/A')}", styles['NormalSmall']))
604
+ story.append(Paragraph(f"<b>Physician:</b> {patient.get('physician', 'N/A')}", styles['NormalSmall']))
605
+ story.append(Paragraph(f"<b>Facility:</b> {patient.get('facility', 'N/A')}", styles['NormalSmall']))
606
+ story.append(Spacer(1, 0.2*inch))
607
+
608
+ # Sample Information Section
609
+ story.append(Paragraph("Sample Information", styles['Section']))
610
+ story.append(Paragraph(f"<b>Specimen Type:</b> {patient.get('specimen_type', 'Cervical Cytology')}", styles['NormalSmall']))
611
+ story.append(Paragraph(f"<b>Clinical History:</b> {patient.get('clinical_history', 'N/A')}", styles['NormalSmall']))
612
+ story.append(Spacer(1, 0.2*inch))
613
+
614
+ # AI Analysis Section
615
+ story.append(Paragraph("AI-ASSISTED ANALYSIS", styles['Section']))
616
+ story.append(Paragraph("<b>System:</b> Manalife AI System — Automated Analysis", styles['NormalSmall']))
617
+ story.append(Paragraph(f"<b>Confidence Score:</b> {ai_summary.get('avg_confidence', 'N/A')}%", styles['NormalSmall']))
618
+
619
+ # Add metrics based on report type
620
+ if report_type == "HISTOPATHOLOGY":
621
+ # For histopathology, show Benign/Malignant confidence
622
+ confidence_dict = ai_summary.get('confidence', {})
623
+ if isinstance(confidence_dict, dict):
624
+ benign_conf = confidence_dict.get('Benign', 0) * 100
625
+ malignant_conf = confidence_dict.get('Malignant', 0) * 100
626
+ story.append(Paragraph(f"<b>Benign Confidence:</b> {benign_conf:.2f}%", styles['NormalSmall']))
627
+ story.append(Paragraph(f"<b>Malignant Confidence:</b> {malignant_conf:.2f}%", styles['NormalSmall']))
628
+ elif report_type == "CYTOLOGY":
629
+ # For cytology (YOLO), show abnormal/normal cells
630
+ if 'abnormal_cells' in ai_summary:
631
+ story.append(Paragraph(f"<b>Abnormal Cells:</b> {ai_summary.get('abnormal_cells', 'N/A')}", styles['NormalSmall']))
632
+ if 'normal_cells' in ai_summary:
633
+ story.append(Paragraph(f"<b>Normal Cells:</b> {ai_summary.get('normal_cells', 'N/A')}", styles['NormalSmall']))
634
+ else:
635
+ # For CIN/Colposcopy, show class confidences
636
+ confidence_dict = ai_summary.get('confidence', {})
637
+ if isinstance(confidence_dict, dict):
638
+ for cls, val in confidence_dict.items():
639
+ conf_pct = val * 100 if isinstance(val, (int, float)) else 0
640
+ story.append(Paragraph(f"<b>{cls} Confidence:</b> {conf_pct:.2f}%", styles['NormalSmall']))
641
+
642
+ story.append(Spacer(1, 0.1*inch))
643
+ story.append(Paragraph("<b>AI Interpretation:</b>", styles['NormalSmall']))
644
+ story.append(Paragraph(ai_summary.get('ai_interpretation', 'Not available.'), styles['NormalSmall']))
645
+ story.append(Spacer(1, 0.2*inch))
646
+
647
+ # Doctor's Notes
648
+ story.append(Paragraph("Doctor's Notes", styles['Section']))
649
+ story.append(Paragraph(report_data.get('doctor_notes') or 'No additional notes provided.', styles['NormalSmall']))
650
+ story.append(Spacer(1, 0.2*inch))
651
+
652
+ # Recommendations
653
+ story.append(Paragraph("RECOMMENDATIONS", styles['Section']))
654
+ story.append(Paragraph("Continue routine screening as per standard guidelines. Follow up as directed by your physician.", styles['NormalSmall']))
655
+ story.append(Spacer(1, 0.3*inch))
656
+
657
+ # Signatures
658
+ story.append(Paragraph("Signatures", styles['Section']))
659
+ story.append(Paragraph("Dr. Emily Roberts, MD (Cytopathologist)", styles['NormalSmall']))
660
+ story.append(Paragraph("Dr. James Wilson, MD (Pathologist)", styles['NormalSmall']))
661
+ story.append(Spacer(1, 0.1*inch))
662
+ story.append(Paragraph(f"Generated on: {datetime.datetime.now().strftime('%b %d, %Y, %I:%M %p')}", styles['NormalSmall']))
663
+
664
+ doc.build(story)
665
+
666
+
667
+
668
+ @app.post("/reports/")
669
+ async def generate_report(
670
+ patient_id: str = Form(...),
671
+ exam_date: str = Form(...),
672
+ metadata: str = Form(...),
673
+ notes: str = Form(None),
674
+ analysis_id: str = Form(None),
675
+ analysis_summary: str = Form(None),
676
+ ):
677
+ """Generate a structured medical report from analysis results and metadata."""
678
+ try:
679
+ # Create reports directory if it doesn't exist
680
+ reports_dir = os.path.join(OUTPUT_DIR, "reports")
681
+ os.makedirs(reports_dir, exist_ok=True)
682
+
683
+ # Generate unique report ID
684
+ report_id = f"{patient_id}_{uuid.uuid4().hex[:8]}"
685
+ report_dir = os.path.join(reports_dir, report_id)
686
+ os.makedirs(report_dir, exist_ok=True)
687
+
688
+ # Parse metadata
689
+ metadata_dict = json.loads(metadata)
690
+
691
+ # Get analysis results - assuming stored in memory or retrievable
692
+ # TODO: Implement analysis results storage/retrieval
693
+
694
+ # Construct report data
695
+ report_data = {
696
+ "report_id": report_id,
697
+ "generated_at": datetime.datetime.now().isoformat(),
698
+ "patient": {
699
+ "id": patient_id,
700
+ "exam_date": exam_date,
701
+ **metadata_dict
702
+ },
703
+ "analysis": {
704
+ "id": analysis_id,
705
+ # If the analysis_id is actually an annotated image URL, store it for report embedding
706
+ "annotated_image_url": analysis_id,
707
+ # TODO: Add actual analysis results
708
+ },
709
+ "doctor_notes": notes
710
+ }
711
+
712
+ # Save report data
713
+ report_json = os.path.join(report_dir, "report.json")
714
+ with open(report_json, "w", encoding="utf-8") as f:
715
+ json.dump(report_data, f, indent=2, ensure_ascii=False)
716
+
717
+ # Attempt to create a PDF version if reportlab is available
718
+ pdf_url = None
719
+ if REPORTLAB_AVAILABLE:
720
+ try:
721
+ pdf_path = os.path.join(report_dir, "report.pdf")
722
+ create_designed_pdf(pdf_path, report_data, analysis_summary)
723
+ pdf_url = f"/outputs/reports/{report_id}/report.pdf"
724
+ except Exception as e:
725
+ print(f"Error creating designed PDF: {e}")
726
+ pdf_url = None
727
+
728
+ # Parse analysis_summary to get AI results
729
+ try:
730
+ ai_summary = json.loads(analysis_summary) if analysis_summary else {}
731
+ except (json.JSONDecodeError, TypeError):
732
+ ai_summary = {}
733
+
734
+ # Determine report type based on analysis summary or model used
735
+ model_used = ai_summary.get('model_used', '')
736
+ if 'YOLO' in model_used or 'yolo' in str(analysis_id).lower():
737
+ report_type = "Cytology"
738
+ report_title = "Cytology Report"
739
+ elif 'CIN' in model_used or 'cin' in str(analysis_id).lower() or 'colpo' in str(analysis_id).lower():
740
+ report_type = "Colposcopy"
741
+ report_title = "Colposcopy Report"
742
+ elif 'histo' in str(analysis_id).lower() or 'histopathology' in model_used.lower():
743
+ report_type = "Histopathology"
744
+ report_title = "Histopathology Report"
745
+ else:
746
+ # Default fallback
747
+ report_type = "Cytology"
748
+ report_title = "Medical Analysis Report"
749
+
750
+ # Build analysis metrics HTML based on report type
751
+ if report_type == "Histopathology":
752
+ # For histopathology, show Benign/Malignant confidence from the confidence dict
753
+ confidence_dict = ai_summary.get('confidence', {})
754
+ benign_conf = confidence_dict.get('Benign', 0) * 100 if isinstance(confidence_dict, dict) else 0
755
+ malignant_conf = confidence_dict.get('Malignant', 0) * 100 if isinstance(confidence_dict, dict) else 0
756
+
757
+ analysis_metrics_html = f"""
758
+ <tr><th>System</th><td>Manalife AI System — Automated Analysis</td></tr>
759
+ <tr><th>Confidence Score</th><td>{ai_summary.get('avg_confidence', 'N/A')}%</td></tr>
760
+ <tr><th>Benign Confidence</th><td>{benign_conf:.2f}%</td></tr>
761
+ <tr><th>Malignant Confidence</th><td>{malignant_conf:.2f}%</td></tr>
762
+ """
763
+ elif report_type == "Cytology":
764
+ # For cytology (YOLO), show abnormal/normal cells
765
+ analysis_metrics_html = f"""
766
+ <tr><th>System</th><td>Manalife AI System — Automated Analysis</td></tr>
767
+ <tr><th>Confidence Score</th><td>{ai_summary.get('avg_confidence', 'N/A')}%</td></tr>
768
+ <tr><th>Abnormal Cells</th><td>{ai_summary.get('abnormal_cells', 'N/A')}</td></tr>
769
+ <tr><th>Normal Cells</th><td>{ai_summary.get('normal_cells', 'N/A')}</td></tr>
770
+ """
771
+ else:
772
+ # For CIN/Colposcopy or other models, show generic confidence
773
+ confidence_dict = ai_summary.get('confidence', {})
774
+ confidence_rows = ""
775
+ if isinstance(confidence_dict, dict):
776
+ for cls, val in confidence_dict.items():
777
+ conf_pct = val * 100 if isinstance(val, (int, float)) else 0
778
+ confidence_rows += f"<tr><th>{cls} Confidence</th><td>{conf_pct:.2f}%</td></tr>\n "
779
+
780
+ analysis_metrics_html = f"""
781
+ <tr><th>System</th><td>Manalife AI System — Automated Analysis</td></tr>
782
+ <tr><th>Confidence Score</th><td>{ai_summary.get('avg_confidence', 'N/A')}%</td></tr>
783
+ {confidence_rows}
784
+ """
785
+
786
+ # Build final HTML including download links and embedded annotated image
787
+ report_html = os.path.join(report_dir, "report.html")
788
+ json_url = f"/outputs/reports/{report_id}/report.json"
789
+ html_url = f"/outputs/reports/{report_id}/report.html"
790
+ annotated_img = report_data.get("analysis", {}).get("annotated_image_url") or ""
791
+
792
+ # Get base URL for the annotated image (if it's a relative path)
793
+ annotated_img_full = f"http://localhost:8000{annotated_img}" if annotated_img and annotated_img.startswith('/') else annotated_img
794
+
795
+ download_pdf_btn = f'<a href="{pdf_url}" download style="text-decoration:none"><button class="btn-secondary">Download PDF</button></a>' if pdf_url else ''
796
+
797
+ # Format generated time
798
+ generated_time = datetime.datetime.now().strftime('%b %d, %Y, %I:%M %p')
799
+
800
+ html_content = f"""<!doctype html>
801
+ <html lang="en">
802
+ <head>
803
+ <meta charset="utf-8" />
804
+ <meta name="viewport" content="width=device-width,initial-scale=1" />
805
+ <title>Medical Analysis Report — Manalife AI</title>
806
+ <style>
807
+ :root{{--bg:#f8fafc;--card:#ffffff;--muted:#6b7280;--accent:#0f172a}}
808
+ body{{font-family:Inter,ui-sans-serif,system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial;margin:0;background:var(--bg);color:var(--accent);line-height:1.45}}
809
+ .container{{max-width:900px;margin:36px auto;padding:20px}}
810
+ header{{display:flex;align-items:center;gap:16px}}
811
+ .brand{{display:flex;flex-direction:column}}
812
+ h1{{margin:0;font-size:20px}}
813
+ .sub{{color:var(--muted);font-size:13px}}
814
+ .card{{background:var(--card);box-shadow:0 6px 18px rgba(15,23,42,0.06);border-radius:12px;padding:20px;margin-top:18px}}
815
+ .grid{{display:grid;grid-template-columns:1fr 1fr;gap:12px}}
816
+ .section-title{{font-weight:600;margin-top:8px}}
817
+ table{{width:100%;border-collapse:collapse;margin-top:8px}}
818
+ td,th{{padding:8px;border-bottom:1px dashed #e6e9ef;text-align:left;font-size:14px}}
819
+ .full{{grid-column:1/-1}}
820
+ .muted{{color:var(--muted);font-size:13px}}
821
+ .footer{{margin-top:20px;font-size:13px;color:var(--muted)}}
822
+ .pill{{background:#eef2ff;color:#1e3a8a;padding:6px 10px;border-radius:999px;font-weight:600;font-size:13px}}
823
+ @media (max-width:700px){{.grid{{grid-template-columns:1fr}}}}
824
+ .signatures{{display:flex;gap:20px;flex-wrap:wrap;margin-top:12px}}
825
+ .sig{{background:#fbfbfd;border:1px solid #f0f1f5;padding:10px;border-radius:8px;min-width:180px}}
826
+ .annotated-image{{max-width:100%;height:auto;border-radius:8px;margin-top:12px;border:1px solid #e6e9ef}}
827
+ .btn-primary{{padding:10px 14px;border-radius:8px;border:1px solid #2563eb;background:#2563eb;color:white;font-weight:700;cursor:pointer}}
828
+ .btn-secondary{{padding:10px 14px;border-radius:8px;border:1px solid #e6eefc;background:#eef2ff;font-weight:700;cursor:pointer}}
829
+ .actions-bar{{margin-top:12px;display:flex;gap:8px;flex-wrap:wrap}}
830
+ </style>
831
+ </head>
832
+ <body>
833
+ <div class="container">
834
+ <header>
835
+ <div>
836
+ <img src="data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='64' height='64'><rect rx='10' width='64' height='64' fill='%230f172a'/><text x='50%' y='55%' font-size='20' fill='white' text-anchor='middle' font-family='Arial'>M</text></svg>" alt="logo" width="64" height="64">
837
+ </div>
838
+ <div class="brand">
839
+ <h1>MANALIFE AI — Medical Analysis</h1>
840
+ <div class="sub">Advanced cytological colposcopy and histopathology reporting</div>
841
+ <div class="muted">contact@manalife.ai • +1 (555) 123-4567</div>
842
+ </div>
843
+ </header>
844
+
845
+ <div class="card">
846
+ <div style="display:flex;justify-content:space-between;align-items:center;gap:12px;flex-wrap:wrap">
847
+ <div>
848
+ <div class="muted">MEDICAL ANALYSIS REPORT OF {report_type.upper()}</div>
849
+ <h2 style="margin:6px 0 0 0">{report_title}</h2>
850
+ </div>
851
+ <div style="text-align:right">
852
+ <div class="pill">Report ID: {report_id}</div>
853
+ <div class="muted" style="margin-top:6px">Generated: {generated_time}</div>
854
+ </div>
855
+ </div>
856
+
857
+ <hr style="border:none;border-top:1px solid #eef2f6;margin:16px 0">
858
+
859
+ <div class="grid">
860
+ <div>
861
+ <div class="section-title">Patient Information</div>
862
+ <table>
863
+ <tr><th>Patient ID</th><td>{patient_id}</td></tr>
864
+ <tr><th>Exam Date</th><td>{exam_date}</td></tr>
865
+ <tr><th>Physician</th><td>{metadata_dict.get('physician', 'N/A')}</td></tr>
866
+ <tr><th>Facility</th><td>{metadata_dict.get('facility', 'N/A')}</td></tr>
867
+ </table>
868
+ </div>
869
+
870
+ <div>
871
+ <div class="section-title">Sample Information</div>
872
+ <table>
873
+ <tr><th>Specimen Type</th><td>{metadata_dict.get('specimen_type', 'N/A')}</td></tr>
874
+ <tr><th>Clinical History</th><td>{metadata_dict.get('clinical_history', 'N/A')}</td></tr>
875
+ <tr><th>Collected</th><td>{exam_date}</td></tr>
876
+ <tr><th>Reported</th><td>{generated_time}</td></tr>
877
+ </table>
878
+ </div>
879
+
880
+ <div class="full">
881
+ <div class="section-title">AI-Assisted Analysis</div>
882
+ <table>
883
+ {analysis_metrics_html}
884
+ </table>
885
+ <div style="margin-top:12px;padding:12px;background:#f8fafc;border-radius:8px;border-left:4px solid #2563eb">
886
+ <div style="font-weight:600;margin-bottom:6px">AI Interpretation:</div>
887
+ <div class="muted">{ai_summary.get('ai_interpretation', 'No AI interpretation available.')}</div>
888
+ </div>
889
+ </div>
890
+
891
+ {'<div class="full"><div class="section-title">Annotated Analysis Image</div><img src="' + annotated_img_full + '" class="annotated-image" alt="Annotated Analysis Result" /></div>' if annotated_img else ''}
892
+
893
+ <div class="full">
894
+ <div class="section-title">Doctor\'s Notes</div>
895
+ <p class="muted">{notes or 'No additional notes provided.'}</p>
896
+ </div>
897
+
898
+ <div class="full">
899
+ <div class="section-title">Recommendations</div>
900
+ <p class="muted">Continue routine screening as per standard guidelines. Follow up as directed by your physician.</p>
901
+ </div>
902
+
903
+ <div class="full">
904
+ <div class="section-title">Signatures</div>
905
+ <div class="signatures">
906
+ <div class="sig">
907
+ <div style="font-weight:700">Dr. Emily Roberts</div>
908
+ <div class="muted">MD, pathologist</div>
909
+ </div>
910
+ <div class="sig">
911
+ <div style="font-weight:700">Dr. James Wilson</div>
912
+ <div class="muted">MD, pathologist</div>
913
+ </div>
914
+ </div>
915
+ </div>
916
+ </div>
917
+
918
+ <div class="footer">
919
+ <div>AI System: Manalife AI — Automated Analysis</div>
920
+ <div style="margin-top:6px">Report generated: {report_data['generated_at']}</div>
921
+ </div>
922
+ </div>
923
+
924
+ <div class="actions-bar">
925
+ {download_pdf_btn}
926
+ <button class="btn-secondary" onclick="window.print()">Print Report</button>
927
+ </div>
928
+ </div>
929
+ </body>
930
+ </html>"""
931
+
932
+ with open(report_html, "w", encoding="utf-8") as f:
933
+ f.write(html_content)
934
 
935
+ return {
936
+ "report_id": report_id,
937
+ "json_url": json_url,
938
+ "html_url": html_url,
939
+ "pdf_url": pdf_url,
940
+ }
941
+
942
+ except Exception as e:
943
+ return JSONResponse(
944
+ content={"error": f"Failed to generate report: {str(e)}"},
945
+ status_code=500
946
+ )
947
+
948
+ @app.get("/reports/{report_id}")
949
+ async def get_report(report_id: str):
950
+ """Fetch a generated report by ID."""
951
+ report_dir = os.path.join(OUTPUT_DIR, "reports", report_id)
952
+ report_json = os.path.join(report_dir, "report.json")
953
+
954
+ if not os.path.exists(report_json):
955
+ return JSONResponse(
956
+ content={"error": "Report not found"},
957
+ status_code=404
958
+ )
959
+
960
+ with open(report_json, "r") as f:
961
+ report_data = json.load(f)
962
+
963
+ return report_data
964
+
965
+ @app.get("/reports")
966
+ async def list_reports(patient_id: str = None):
967
+ """List all generated reports, optionally filtered by patient ID."""
968
+ reports_dir = os.path.join(OUTPUT_DIR, "reports")
969
+ if not os.path.exists(reports_dir):
970
+ return {"reports": []}
971
+
972
+ reports = []
973
+ for report_id in os.listdir(reports_dir):
974
+ report_json = os.path.join(reports_dir, report_id, "report.json")
975
+ if os.path.exists(report_json):
976
+ with open(report_json, "r") as f:
977
+ report_data = json.load(f)
978
+ if not patient_id or report_data["patient"]["id"] == patient_id:
979
+ reports.append({
980
+ "report_id": report_id,
981
+ "patient_id": report_data["patient"]["id"],
982
+ "exam_date": report_data["patient"]["exam_date"],
983
+ "generated_at": report_data["generated_at"]
984
+ })
985
+
986
+ return {"reports": sorted(reports, key=lambda r: r["generated_at"], reverse=True)}
987
 
988
  @app.get("/models")
989
  def get_models():
990
  return {"available_models": ["yolo", "mwt", "cin", "histopathology"]}
991
 
 
992
  @app.get("/health")
993
  def health():
994
+ return {"message": "Pathora Medical Diagnostic API is running!"}
995
+
996
+ # FRONTEND
997
+
998
+ # =====================================================
999
 
 
 
 
 
1000
 
1001
+ # Serve frontend only if it has been built; avoid startup failure when dist/ is missing.
1002
+ FRONTEND_DIST = os.path.abspath(os.path.join(os.path.dirname(__file__), "../frontend/dist"))
1003
+ ASSETS_DIR = os.path.join(FRONTEND_DIST, "assets")
1004
 
1005
+ if os.path.isdir(ASSETS_DIR):
1006
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
1007
+ else:
1008
+ print("ℹ️ Frontend assets directory not found — skipping /assets mount.")
1009
 
1010
  @app.get("/")
1011
  async def serve_frontend():
1012
+ index_path = os.path.join(FRONTEND_DIST, "index.html")
1013
+ if os.path.isfile(index_path):
1014
+ return FileResponse(index_path)
1015
+ return JSONResponse({"message": "Backend is running. Frontend build not found."})
1016
 
1017
  if __name__ == "__main__":
1018
+ # Use PORT provided by the environment (Hugging Face Spaces sets PORT=7860)
1019
+ port = int(os.environ.get("PORT", 7860))
1020
+ uvicorn.run(app, host="0.0.0.0", port=port)