Spaces:
Sleeping
Sleeping
Upload 40 files
Browse files- Dockerfile +25 -0
- api/__init__.py +0 -0
- api/__pycache__/__init__.cpython-312.pyc +0 -0
- api/__pycache__/api_blueprint.cpython-312.pyc +0 -0
- api/__pycache__/utils.cpython-312.pyc +0 -0
- api/api_blueprint.py +784 -0
- api/utils.py +1328 -0
- app.py +73 -0
- constants.py +126 -0
- handle.py +124 -0
- index-Bv-pE24x.js +0 -0
- migrations/script.py.mako +26 -0
- models/__init__.py +0 -0
- models/__pycache__/__init__.cpython-312.pyc +0 -0
- models/__pycache__/application_session.cpython-312.pyc +0 -0
- models/__pycache__/base.cpython-312.pyc +0 -0
- models/__pycache__/combined_labels.cpython-312.pyc +0 -0
- models/application_session.py +24 -0
- models/base.py +7 -0
- models/combined_labels.py +22 -0
- requirements.txt +35 -0
- services/__init__.py +0 -0
- services/__pycache__/__init__.cpython-312.pyc +0 -0
- services/__pycache__/auto_segmentor.cpython-312.pyc +0 -0
- services/__pycache__/nifti_processor.cpython-312.pyc +0 -0
- services/__pycache__/npz_processor.cpython-312.pyc +0 -0
- services/__pycache__/session_manager.cpython-312.pyc +0 -0
- services/auto_segmentor.py +95 -0
- services/nifti_processor.py +214 -0
- services/npz_processor.py +229 -0
- services/session_manager.py +176 -0
- tests/__init__.py +0 -0
- tests/functional/__init__.py +0 -0
- tests/unit/__init__.py +0 -0
- tests/unit/test_app_session_model.py +132 -0
- tests/unit/test_combined_labels.py +77 -0
- tests/unit/test_nifti_combine.py +156 -0
- tests/unit/test_nifti_processing.py +111 -0
- tests/unit/test_scheduled_check.py +58 -0
- utils.py +14 -0
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
COPY requirements.txt .
|
| 7 |
+
|
| 8 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
COPY . .
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
ENV BASE_PATH=${BASE_PATH}
|
| 15 |
+
ENV SESSIONS_DIR_PATH=${SESSIONS_DIR_PATH}
|
| 16 |
+
|
| 17 |
+
ENV DB_USER=${DB_USER}
|
| 18 |
+
ENV DB_PASS=${DB_PASS}
|
| 19 |
+
ENV DB_HOST=${DB_HOST}
|
| 20 |
+
ENV DB_NAME=${DB_NAME}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
EXPOSE 7860
|
| 24 |
+
|
| 25 |
+
CMD ["gunicorn", "-w", "2", "app:app", "--bind", "0.0.0.0:7860"]
|
api/__init__.py
ADDED
|
File without changes
|
api/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (168 Bytes). View file
|
|
|
api/__pycache__/api_blueprint.cpython-312.pyc
ADDED
|
Binary file (37.4 kB). View file
|
|
|
api/__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (70 kB). View file
|
|
|
api/api_blueprint.py
ADDED
|
@@ -0,0 +1,784 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Blueprint, send_file, make_response, request, jsonify, Response
|
| 2 |
+
from services.nifti_processor import NiftiProcessor
|
| 3 |
+
from services.session_manager import SessionManager, generate_uuid
|
| 4 |
+
from services.auto_segmentor import run_auto_segmentation
|
| 5 |
+
from models.application_session import ApplicationSession
|
| 6 |
+
from models.combined_labels import CombinedLabels
|
| 7 |
+
from models.base import db
|
| 8 |
+
from constants import Constants
|
| 9 |
+
import zipfile
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from io import BytesIO
|
| 14 |
+
from reportlab.pdfgen import canvas
|
| 15 |
+
from reportlab.lib.pagesizes import letter
|
| 16 |
+
from reportlab.lib.units import cm
|
| 17 |
+
|
| 18 |
+
from sqlalchemy.orm import aliased
|
| 19 |
+
import os
|
| 20 |
+
from dotenv import load_dotenv
|
| 21 |
+
|
| 22 |
+
# Load environment variables
|
| 23 |
+
load_dotenv()
|
| 24 |
+
import nibabel as nib
|
| 25 |
+
import uuid
|
| 26 |
+
|
| 27 |
+
from datetime import datetime, timedelta
|
| 28 |
+
from .utils import *
|
| 29 |
+
import requests # ⭐ 只在這裡 import 一次 requests
|
| 30 |
+
|
| 31 |
+
# 建立 blueprint
|
| 32 |
+
api_blueprint = Blueprint("api", __name__)
|
| 33 |
+
last_session_check = datetime.now()
|
| 34 |
+
|
| 35 |
+
progress_tracker = {} # {session_id: (start_time, expected_total_seconds)}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@api_blueprint.route("/proxy-image")
|
| 39 |
+
def proxy_image():
|
| 40 |
+
"""
|
| 41 |
+
Proxy image requests so the browser only talks to our own origin.
|
| 42 |
+
Front-end will call: /api/proxy-image?url=<encoded_hf_url>
|
| 43 |
+
"""
|
| 44 |
+
raw_url = request.args.get("url")
|
| 45 |
+
if not raw_url:
|
| 46 |
+
return Response("Missing url parameter", status=400)
|
| 47 |
+
|
| 48 |
+
# 可選安全限制:只允許 HuggingFace 來源
|
| 49 |
+
if not raw_url.startswith("https://huggingface.co/"):
|
| 50 |
+
return Response("Forbidden", status=403)
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
r = requests.get(raw_url, timeout=10)
|
| 54 |
+
except Exception as e:
|
| 55 |
+
return Response(f"Upstream error: {e}", status=502)
|
| 56 |
+
|
| 57 |
+
if not r.ok:
|
| 58 |
+
return Response(f"Upstream status {r.status_code}", status=r.status_code)
|
| 59 |
+
|
| 60 |
+
content_type = r.headers.get("Content-Type", "image/jpeg")
|
| 61 |
+
|
| 62 |
+
resp = Response(r.content, status=200, mimetype=content_type)
|
| 63 |
+
|
| 64 |
+
# ⭐ 避免 COEP 再擋圖片
|
| 65 |
+
resp.headers["Cross-Origin-Resource-Policy"] = "cross-origin"
|
| 66 |
+
|
| 67 |
+
return resp
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
from flask import request, jsonify
|
| 72 |
+
import numpy as np
|
| 73 |
+
import nibabel as nib
|
| 74 |
+
from scipy.ndimage import distance_transform_edt, label
|
| 75 |
+
from collections import defaultdict
|
| 76 |
+
from constants import Constants
|
| 77 |
+
import os
|
| 78 |
+
from openpyxl import load_workbook
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
SESSIONS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "tmp")
|
| 82 |
+
PDF_DIR = f"{Constants.PANTS_PATH}/data/pdf"
|
| 83 |
+
os.makedirs(SESSIONS_DIR, exist_ok=True)
|
| 84 |
+
os.makedirs(PDF_DIR, exist_ok=True)
|
| 85 |
+
|
| 86 |
+
def _arg(name: str, default=None):
|
| 87 |
+
return request.args.get(name, default)
|
| 88 |
+
|
| 89 |
+
@api_blueprint.route('/get_preview/<clabel_ids>', methods=['GET'])
|
| 90 |
+
def get_preview(clabel_ids):
|
| 91 |
+
# get age and thumbnail
|
| 92 |
+
clabel_ids = clabel_ids.split(",")
|
| 93 |
+
wb = load_workbook(os.path.join(Constants.PANTS_PATH, "data", "metadata.xlsx"))
|
| 94 |
+
sheet = wb["PanTS_metadata"]
|
| 95 |
+
res = {
|
| 96 |
+
x: {
|
| 97 |
+
"sex": "",
|
| 98 |
+
"age": ""
|
| 99 |
+
} for x in clabel_ids
|
| 100 |
+
}
|
| 101 |
+
for clabel_id in clabel_ids:
|
| 102 |
+
for row in sheet.iter_rows(values_only=True):
|
| 103 |
+
if row[0] == get_panTS_id(clabel_id):
|
| 104 |
+
res[clabel_id]["sex"] = row[4]
|
| 105 |
+
res[clabel_id]["age"] = row[5]
|
| 106 |
+
break
|
| 107 |
+
|
| 108 |
+
return jsonify(res)
|
| 109 |
+
|
| 110 |
+
# if not preloaded
|
| 111 |
+
@api_blueprint.route('/get_image_preview/<clabel_id>', methods=['GET'])
|
| 112 |
+
def get_image_preview(clabel_id):
|
| 113 |
+
# get age and thumbnail
|
| 114 |
+
# subfolder = "LabelTr" if int(clabel_id) < 9000 else "LabelTe"
|
| 115 |
+
subfolder = "ProfileTr" if int(clabel_id) < 9000 else "ProfileTe"
|
| 116 |
+
# path = os.path.join(Constants.PANTS_PATH, "data", subfolder, get_panTS_id(clabel_id), Constants.COMBINED_LABELS_FILENAME)
|
| 117 |
+
# if not os.path.exists(path):
|
| 118 |
+
# print(f"File not found: {path}. Making file")
|
| 119 |
+
# npz_processor = NpzProcessor()
|
| 120 |
+
# npz_processor.combine_labels(int(clabel_id))
|
| 121 |
+
|
| 122 |
+
path = os.path.join(Constants.PANTS_PATH, subfolder, get_panTS_id(clabel_id), "profile.jpg")
|
| 123 |
+
# arr = np.load(path)["data"]
|
| 124 |
+
# bytes = volume_to_png(arr)
|
| 125 |
+
return send_file(
|
| 126 |
+
path,
|
| 127 |
+
mimetype="image/jpg",
|
| 128 |
+
as_attachment=False,
|
| 129 |
+
download_name=f"{clabel_id}_slice.jpg"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@api_blueprint.route('/get-label-colormap/<clabel_id>', methods=['GET'])
|
| 136 |
+
def get_label_colormap(clabel_id):
|
| 137 |
+
subfolder = "LabelTr" if int(clabel_id) < 9000 else "LabelTe"
|
| 138 |
+
|
| 139 |
+
clabel_path = os.path.join(Constants.PANTS_PATH, "data", subfolder, get_panTS_id(int(clabel_id)), 'combined_labels.nii.gz')
|
| 140 |
+
|
| 141 |
+
if not os.path.exists(clabel_path):
|
| 142 |
+
print(f"File not found: {clabel_path}. Making file")
|
| 143 |
+
combine_label_npz(int(clabel_id))
|
| 144 |
+
npzProcessor = NpzProcessor()
|
| 145 |
+
npzProcessor.npz_to_nifti(int(clabel_id))
|
| 146 |
+
try:
|
| 147 |
+
clabel_array = nib.load(clabel_path)
|
| 148 |
+
clabel_array = clabel_array.get_fdata()
|
| 149 |
+
print("[DEBUG] Nifti loaded, shape =", clabel_array.shape)
|
| 150 |
+
|
| 151 |
+
filled_array = fill_voids_with_nearest_label(clabel_array)
|
| 152 |
+
print("[DEBUG] fill_voids_with_nearest_label done")
|
| 153 |
+
|
| 154 |
+
adjacency = build_adjacency_graph(filled_array)
|
| 155 |
+
print("[DEBUG] build_adjacency_graph done")
|
| 156 |
+
|
| 157 |
+
unique_labels = sorted(adjacency.keys())
|
| 158 |
+
color_map, color_usage_count = assign_colors_with_high_contrast(unique_labels, adjacency)
|
| 159 |
+
print("[DEBUG] Color map generated:", color_map, color_usage_count)
|
| 160 |
+
|
| 161 |
+
return jsonify(color_map)
|
| 162 |
+
|
| 163 |
+
except Exception as e:
|
| 164 |
+
print("[❌ EXCEPTION]", str(e))
|
| 165 |
+
return jsonify({"error": str(e)}), 500
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
# @api_blueprint.before_request
|
| 171 |
+
# def before_request():
|
| 172 |
+
# global last_session_check
|
| 173 |
+
# current_time = datetime.now()
|
| 174 |
+
# if current_time >= last_session_check + timedelta(minutes=Constants.SCHEDULED_CHECK_INTERVAL):
|
| 175 |
+
# session_manager = SessionManager.instance()
|
| 176 |
+
# expired = session_manager.get_expired()
|
| 177 |
+
# for app_session in expired:
|
| 178 |
+
# session_manager.terminate_session(app_session.session_id)
|
| 179 |
+
|
| 180 |
+
# last_session_check = current_time
|
| 181 |
+
|
| 182 |
+
@api_blueprint.route('/', methods=['GET'])
|
| 183 |
+
def home():
|
| 184 |
+
return "api"
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@api_blueprint.route('/upload', methods=['POST'])
|
| 188 |
+
def upload():
|
| 189 |
+
try:
|
| 190 |
+
session_id = request.form.get('SESSION_ID')
|
| 191 |
+
if not session_id:
|
| 192 |
+
return jsonify({"error": "No session ID provided"}), 400
|
| 193 |
+
|
| 194 |
+
base_path = os.path.join(Constants.SESSIONS_DIR_NAME, session_id)
|
| 195 |
+
os.makedirs(base_path, exist_ok=True)
|
| 196 |
+
|
| 197 |
+
nifti_multi_dict = request.files
|
| 198 |
+
filenames = list(nifti_multi_dict)
|
| 199 |
+
main_nifti = nifti_multi_dict.get(Constants.MAIN_NIFTI_FORM_NAME)
|
| 200 |
+
|
| 201 |
+
if main_nifti:
|
| 202 |
+
main_nifti_path = os.path.join(base_path, Constants.MAIN_NIFTI_FILENAME)
|
| 203 |
+
main_nifti.save(main_nifti_path)
|
| 204 |
+
filenames.remove(Constants.MAIN_NIFTI_FORM_NAME)
|
| 205 |
+
else:
|
| 206 |
+
return jsonify({"error": "Main NIFTI file missing"}), 400
|
| 207 |
+
|
| 208 |
+
nifti_processor = NiftiProcessor.from_clabel_path(os.path.join(base_path, Constants.COMBINED_LABELS_FILENAME))
|
| 209 |
+
|
| 210 |
+
combined_labels, organ_intensities = nifti_processor.combine_labels(filenames, nifti_multi_dict, save=True)
|
| 211 |
+
|
| 212 |
+
resp = {
|
| 213 |
+
'status': "200",
|
| 214 |
+
'session_id': session_id,
|
| 215 |
+
'organ_intensities': organ_intensities
|
| 216 |
+
}
|
| 217 |
+
return jsonify(resp)
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"❌ [Upload Error] {e}")
|
| 220 |
+
return jsonify({"error": "Internal server error"}), 500
|
| 221 |
+
|
| 222 |
+
@api_blueprint.route('/mask-data', methods=['POST'])
|
| 223 |
+
def get_mask_data():
|
| 224 |
+
session_key = request.form.get('sessionKey')
|
| 225 |
+
if not session_key:
|
| 226 |
+
return jsonify({"error": "Missing sessionKey"}), 400
|
| 227 |
+
|
| 228 |
+
result = get_mask_data_internal(session_key)
|
| 229 |
+
return jsonify(result)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
@api_blueprint.route('/get-main-nifti/<clabel_id>', methods=['GET'])
|
| 233 |
+
def get_main_nifti(clabel_id):
|
| 234 |
+
subfolder = "ImageTr" if int(clabel_id) < 9000 else "ImageTe"
|
| 235 |
+
main_nifti_path = f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(clabel_id)}/{Constants.MAIN_NIFTI_FILENAME}"
|
| 236 |
+
|
| 237 |
+
if os.path.exists(main_nifti_path):
|
| 238 |
+
response = make_response(send_file(main_nifti_path, mimetype='application/gzip'))
|
| 239 |
+
|
| 240 |
+
response.headers['Cross-Origin-Opener-Policy'] = 'same-origin'
|
| 241 |
+
response.headers['Cross-Origin-Embedder-Policy'] = 'require-corp'
|
| 242 |
+
response.headers['Content-Encoding'] = 'gzip'
|
| 243 |
+
|
| 244 |
+
else:
|
| 245 |
+
print(f"Could not find filepath: {main_nifti_path}. ")
|
| 246 |
+
return jsonify({"error": "Could not find filepath"}), 404
|
| 247 |
+
|
| 248 |
+
# npz_path = main_nifti_path.replace(".nii.gz", ".npz")
|
| 249 |
+
# if not os.path.exists(npz_path):
|
| 250 |
+
# return jsonify({"error": "Could not find npz filepath"}), 404
|
| 251 |
+
# npz_processor = NpzProcessor()
|
| 252 |
+
# npz_processor.npz_to_nifti(int(clabel_id), combined_label=False, save=True)
|
| 253 |
+
|
| 254 |
+
# response = make_response(send_file(main_nifti_path, mimetype='application/gzip'))
|
| 255 |
+
|
| 256 |
+
# response.headers['Cross-Origin-Opener-Policy'] = 'same-origin'
|
| 257 |
+
# response.headers['Cross-Origin-Embedder-Policy'] = 'require-corp'
|
| 258 |
+
# response.headers['Content-Encoding'] = 'gzip'
|
| 259 |
+
|
| 260 |
+
return response
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
@api_blueprint.route('/get-report/<id>', methods=['GET'])
|
| 266 |
+
def get_report(id):
|
| 267 |
+
temp_pdf_path = f"{PDF_DIR}/temp.pdf"
|
| 268 |
+
output_pdf_path = f"{PDF_DIR}/final.pdf"
|
| 269 |
+
try:
|
| 270 |
+
try:
|
| 271 |
+
organ_metrics = get_mask_data_internal(id)
|
| 272 |
+
organ_metrics = organ_metrics.get("organ_metrics", [])
|
| 273 |
+
except Exception as e:
|
| 274 |
+
return jsonify({"error": f"Error loading organ metrics: {str(e)}"}), 500
|
| 275 |
+
|
| 276 |
+
subfolder = "ImageTr" if int(id) < 9000 else "ImageTe"
|
| 277 |
+
label_subfolder = "LabelTr" if int(id) < 9000 else "LabelTe"
|
| 278 |
+
|
| 279 |
+
base_path = f"{SESSIONS_DIR}/{id}"
|
| 280 |
+
ct_path = f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(id)}/{Constants.MAIN_NIFTI_FILENAME}"
|
| 281 |
+
masks = f"{Constants.PANTS_PATH}/data/{label_subfolder}/{get_panTS_id(id)}/{Constants.COMBINED_LABELS_NIFTI_FILENAME}"
|
| 282 |
+
|
| 283 |
+
npz_processor = NpzProcessor()
|
| 284 |
+
|
| 285 |
+
# if (not os.path.exists(ct_path)):
|
| 286 |
+
# npz_processor.npz_to_nifti(int(id), combined_label=False, save=True)
|
| 287 |
+
|
| 288 |
+
if (not os.path.exists(masks)):
|
| 289 |
+
npz_processor.combine_labels(int(id), keywords={"pancrea": "pancreas"}, save=True)
|
| 290 |
+
npz_processor.npz_to_nifti(int(id), combined_label=True, save=True)
|
| 291 |
+
|
| 292 |
+
template_pdf = os.getenv("TEMPLATE_PATH", "report_template_3.pdf")
|
| 293 |
+
|
| 294 |
+
extracted_data = None
|
| 295 |
+
column_headers = None
|
| 296 |
+
try:
|
| 297 |
+
csv_path = f"{base_path}/info.csv"
|
| 298 |
+
df = pd.read_csv(csv_path)
|
| 299 |
+
extracted_data = df.iloc[0] if len(df) > 0 else None
|
| 300 |
+
column_headers = df.columns.tolist()
|
| 301 |
+
except Exception:
|
| 302 |
+
pass
|
| 303 |
+
|
| 304 |
+
generate_pdf_with_template(
|
| 305 |
+
output_pdf=output_pdf_path,
|
| 306 |
+
folder_name=id,
|
| 307 |
+
ct_path=ct_path,
|
| 308 |
+
mask_path=masks,
|
| 309 |
+
template_pdf=template_pdf,
|
| 310 |
+
temp_pdf_path=temp_pdf_path,
|
| 311 |
+
id=id,
|
| 312 |
+
extracted_data=extracted_data,
|
| 313 |
+
column_headers=column_headers
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
return send_file(
|
| 317 |
+
output_pdf_path,
|
| 318 |
+
mimetype="application/pdf",
|
| 319 |
+
as_attachment=True,
|
| 320 |
+
download_name=f"report_{id}.pdf"
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
except Exception as e:
|
| 324 |
+
return jsonify({"error": f"Unhandled error: {str(e)}"}), 500
|
| 325 |
+
|
| 326 |
+
finally:
|
| 327 |
+
if os.path.exists(temp_pdf_path):
|
| 328 |
+
os.remove(temp_pdf_path)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@api_blueprint.route('/get-segmentations/<combined_labels_id>', methods=['GET'])
|
| 332 |
+
async def get_segmentations(combined_labels_id):
|
| 333 |
+
subfolder = "LabelTr" if int(combined_labels_id) < 9000 else "LabelTe"
|
| 334 |
+
nifti_path = f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(combined_labels_id)}/{Constants.COMBINED_LABELS_NIFTI_FILENAME}"
|
| 335 |
+
labels = list(Constants.PREDEFINED_LABELS.values())
|
| 336 |
+
if not os.path.exists(nifti_path):
|
| 337 |
+
await store_files(combined_labels_id)
|
| 338 |
+
niftiProcessor = NpzProcessor()
|
| 339 |
+
niftiProcessor.nifti_combine_labels(int(combined_labels_id))
|
| 340 |
+
# print(f"Could not find filepath: {nifti_path}. Creating a new one")
|
| 341 |
+
# npz_path = nifti_path.replace(".nii.gz", ".npz")
|
| 342 |
+
# npz_processor = NpzProcessor()
|
| 343 |
+
# if not os.path.exists(npz_path):
|
| 344 |
+
# print(f"Could not find npz filepath: {npz_path}. Creating a new one")
|
| 345 |
+
|
| 346 |
+
# # ! pancrea instead of pancreas to include pancreatic labels
|
| 347 |
+
# npz_processor.combine_labels(combined_labels_id, keywords={"pancrea": "pancreas"}, save=True)
|
| 348 |
+
|
| 349 |
+
# npz_processor.npz_to_nifti(int(combined_labels_id), combined_label=True, save=True)
|
| 350 |
+
|
| 351 |
+
img = nib.load(nifti_path)
|
| 352 |
+
data = img.get_fdata()
|
| 353 |
+
if img.get_data_dtype() != np.uint8:
|
| 354 |
+
print("⚠️ Detected float label map, converting to uint8 for Niivue compatibility...")
|
| 355 |
+
|
| 356 |
+
try:
|
| 357 |
+
img = nib.load(nifti_path)
|
| 358 |
+
data = img.get_fdata()
|
| 359 |
+
|
| 360 |
+
if img.get_data_dtype() != np.uint8:
|
| 361 |
+
|
| 362 |
+
data_uint8 = data.astype(np.uint8)
|
| 363 |
+
new_img = nib.Nifti1Image(data_uint8, img.affine, header=img.header)
|
| 364 |
+
new_img.set_data_dtype(np.uint8)
|
| 365 |
+
|
| 366 |
+
converted_path = nifti_path#.replace(".nii.gz", "_uint8.nii.gz")
|
| 367 |
+
|
| 368 |
+
if not os.path.exists(converted_path):
|
| 369 |
+
nib.save(new_img, converted_path)
|
| 370 |
+
else:
|
| 371 |
+
converted_path = nifti_path
|
| 372 |
+
|
| 373 |
+
response = make_response(send_file(converted_path, mimetype='application/gzip'))
|
| 374 |
+
response.headers['Cross-Origin-Opener-Policy'] = 'same-origin'
|
| 375 |
+
response.headers['Cross-Origin-Embedder-Policy'] = 'require-corp'
|
| 376 |
+
response.headers['Content-Encoding'] = 'gzip'
|
| 377 |
+
|
| 378 |
+
return response
|
| 379 |
+
|
| 380 |
+
except Exception as e:
|
| 381 |
+
print(f"❌ [get-segmentations ERROR] {e}")
|
| 382 |
+
return jsonify({"error": str(e)}), 500
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
@api_blueprint.route('/download/<id>', methods=['GET'])
|
| 386 |
+
def download_segmentation_zip(id):
|
| 387 |
+
try:
|
| 388 |
+
subfolder = "LabelTr" if int(id) < 9000 else "LabelTe"
|
| 389 |
+
outputs_ct_folder = Path(f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(id)}/segmentations")
|
| 390 |
+
|
| 391 |
+
if not os.path.exists(outputs_ct_folder):
|
| 392 |
+
return jsonify({"error": "Outputs/ct folder not found"}), 404
|
| 393 |
+
|
| 394 |
+
files = list(outputs_ct_folder.glob("*"))
|
| 395 |
+
|
| 396 |
+
zip_buffer = BytesIO()
|
| 397 |
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
| 398 |
+
for file_path in files:
|
| 399 |
+
zip_file.write(file_path, arcname=file_path.name)
|
| 400 |
+
|
| 401 |
+
zip_buffer.seek(0) # rewind
|
| 402 |
+
|
| 403 |
+
return send_file(
|
| 404 |
+
zip_buffer,
|
| 405 |
+
mimetype="application/zip",
|
| 406 |
+
as_attachment=True,
|
| 407 |
+
download_name=f"case_{id}_segmentations.zip"
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
except Exception as e:
|
| 413 |
+
print(f"❌ [Download Error] {e}")
|
| 414 |
+
return jsonify({"error": "Internal server error"}), 500
|
| 415 |
+
|
| 416 |
+
import threading
|
| 417 |
+
import time
|
| 418 |
+
|
| 419 |
+
@api_blueprint.route('/auto_segment/<session_id>', methods=['POST'])
|
| 420 |
+
def auto_segment(session_id):
|
| 421 |
+
|
| 422 |
+
if 'MAIN_NIFTI' not in request.files:
|
| 423 |
+
return jsonify({"error": "No CT file provided"}), 400
|
| 424 |
+
|
| 425 |
+
ct_file = request.files['MAIN_NIFTI']
|
| 426 |
+
model_name = request.form.get("MODEL_NAME", None)
|
| 427 |
+
|
| 428 |
+
# Check if model name is valid
|
| 429 |
+
if model_name is None:
|
| 430 |
+
return {"error": "MODEL_NAME is required."}, 400
|
| 431 |
+
# Step 1: Create a unique session directory to store CT and mask
|
| 432 |
+
session_path = os.path.join(SESSIONS_DIR, session_id)
|
| 433 |
+
os.makedirs(session_path, exist_ok=True)
|
| 434 |
+
|
| 435 |
+
# Step 2: Save CT file under this session
|
| 436 |
+
input_path = os.path.join(session_path, ct_file.filename)
|
| 437 |
+
ct_file.save(input_path)
|
| 438 |
+
|
| 439 |
+
def do_segmentation_and_zip():
|
| 440 |
+
time.sleep(10)
|
| 441 |
+
output_mask_dir = run_auto_segmentation(input_path, session_dir=session_path, model=model_name)
|
| 442 |
+
|
| 443 |
+
if output_mask_dir is None or not os.path.exists(output_mask_dir):
|
| 444 |
+
print(f"❌ Auto segmentation failed for session {session_id}")
|
| 445 |
+
return ##the logic still needs to be improved in the future. when output_mask_dir is none here, no error output at user's end
|
| 446 |
+
|
| 447 |
+
zip_path = os.path.join(session_path, "auto_masks.zip")
|
| 448 |
+
with zipfile.ZipFile(zip_path, 'w') as zipf:
|
| 449 |
+
for filename in os.listdir(output_mask_dir):
|
| 450 |
+
if filename.endswith(".nii.gz"):
|
| 451 |
+
abs_path = os.path.join(output_mask_dir, filename)
|
| 452 |
+
zipf.write(abs_path, arcname=filename)
|
| 453 |
+
|
| 454 |
+
start_time, expected_time, _ = progress_tracker[session_id]
|
| 455 |
+
progress_tracker[session_id] = (start_time, expected_time, True)
|
| 456 |
+
progress_tracker.pop(session_id, None)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
print(f"✅ Finished segmentation and zipping for session {session_id}")
|
| 461 |
+
|
| 462 |
+
#threading.Thread(target=do_segmentation_and_zip).start()
|
| 463 |
+
threading.Thread(target=do_segmentation_and_zip, ).start()
|
| 464 |
+
print("[Server] auto_segment request is returning now")
|
| 465 |
+
return jsonify({"message": "Segmentation started"}), 200
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
@api_blueprint.route('/get_result/<session_id>', methods=['GET'])
|
| 470 |
+
def get_result(session_id):
|
| 471 |
+
session_path = os.path.join(SESSIONS_DIR, session_id)
|
| 472 |
+
zip_path = os.path.join(session_path, "auto_masks.zip")
|
| 473 |
+
|
| 474 |
+
wait_for_file(zip_path, timeout=30)
|
| 475 |
+
|
| 476 |
+
response = send_file(
|
| 477 |
+
zip_path,
|
| 478 |
+
as_attachment=True,
|
| 479 |
+
download_name="auto_masks.zip"
|
| 480 |
+
)
|
| 481 |
+
response.headers["X-Session-Id"] = session_id
|
| 482 |
+
return response
|
| 483 |
+
|
| 484 |
+
#
|
| 485 |
+
#@api_blueprint.route('/progress_end/<session_id>', methods=['GET'])
|
| 486 |
+
#def progress_end(session_id):
|
| 487 |
+
# progress_tracker.pop(session_id, None)
|
| 488 |
+
# return jsonify({"message": "Progress End"}), 200
|
| 489 |
+
|
| 490 |
+
@api_blueprint.route('/ping', methods=['GET'])
|
| 491 |
+
def ping():
|
| 492 |
+
return jsonify({"message": "pong"}), 200
|
| 493 |
+
|
| 494 |
+
@api_blueprint.route("/search", methods=["GET"])
|
| 495 |
+
def api_search():
|
| 496 |
+
# return jsonify({"message": "pong"}), 200
|
| 497 |
+
df = apply_filters(DF).copy()
|
| 498 |
+
sort_by = (_arg("sort_by", "top") or "top").strip().lower()
|
| 499 |
+
sort_by = (_arg("sort_by", "top") or "top").strip().lower()
|
| 500 |
+
df = ensure_sort_cols(df)
|
| 501 |
+
|
| 502 |
+
# ---- 排序參數 ----
|
| 503 |
+
sort_by = (_arg("sort_by", "top") or "top").strip().lower()
|
| 504 |
+
sort_dir = (_arg("sort_dir", "asc") or "asc").strip().lower()
|
| 505 |
+
|
| 506 |
+
if sort_by in ("top", "quality"):
|
| 507 |
+
by = ["__complete", "__spacing_sum", "__shape_sum", "__case_sortkey"]
|
| 508 |
+
asc = [False, True, False, True]
|
| 509 |
+
elif sort_by in ("id", "id_asc"):
|
| 510 |
+
by, asc = ["__case_sortkey"], [True]
|
| 511 |
+
elif sort_by == "id_desc":
|
| 512 |
+
by, asc = ["__case_sortkey"], [False]
|
| 513 |
+
elif sort_by in ("shape_desc", "shape"):
|
| 514 |
+
by, asc = ["__shape_sum", "__case_sortkey"], [False, True]
|
| 515 |
+
elif sort_by in ("spacing_asc", "spacing"):
|
| 516 |
+
by, asc = ["__spacing_sum", "__case_sortkey"], [True, True]
|
| 517 |
+
elif sort_by == "age_asc":
|
| 518 |
+
by, asc = ["__age", "__case_sortkey"], [True, True]
|
| 519 |
+
elif sort_by == "age_desc":
|
| 520 |
+
by, asc = ["__age", "__case_sortkey"], [False, True]
|
| 521 |
+
else:
|
| 522 |
+
key_map = {"id": "__case_sortkey", "spacing": "__spacing_sum", "shape": "__shape_sum"}
|
| 523 |
+
k = key_map.get(sort_by, "__case_sortkey")
|
| 524 |
+
by, asc = [k, "__case_sortkey"], [(sort_dir != "desc"), True]
|
| 525 |
+
|
| 526 |
+
# ---- 排序 ----
|
| 527 |
+
df = df.sort_values(by=by, ascending=asc, na_position="last", kind="mergesort")
|
| 528 |
+
|
| 529 |
+
# ---- 分頁:注意 total 先算完篩選後的完整筆數 ----
|
| 530 |
+
total = int(len(df))
|
| 531 |
+
page = max(to_int(_arg("page", "1")) or 1, 1)
|
| 532 |
+
per_page = to_int(_arg("per_page", "24")) or 24
|
| 533 |
+
per_page = max(1, min(per_page, 1_000_000))
|
| 534 |
+
|
| 535 |
+
pages = max(1, int(math.ceil(total / per_page)))
|
| 536 |
+
page = max(1, min(page, pages))
|
| 537 |
+
start, end = (page - 1) * per_page, (page - 1) * per_page + per_page
|
| 538 |
+
|
| 539 |
+
# ---- 轉成前端想要的 items ----
|
| 540 |
+
items = [row_to_item(r) for _, r in df.iloc[start:end].iterrows()]
|
| 541 |
+
items = clean_json_list(items)
|
| 542 |
+
|
| 543 |
+
return jsonify({
|
| 544 |
+
"items": items, # ← 前端只讀這個渲染卡片
|
| 545 |
+
"total": total, # ← 正確的最終數量
|
| 546 |
+
"page": page,
|
| 547 |
+
"per_page": per_page,
|
| 548 |
+
"query": request.query_string.decode(errors="ignore") or ""
|
| 549 |
+
})
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def _facet_counts_with_unknown(df: pd.DataFrame, col_key: str, top_k: int = 6) -> Dict[str, Any]:
|
| 553 |
+
"""Compute facet rows + unknown count, with robust handling for NaN/strings."""
|
| 554 |
+
rows: List[Dict[str, Any]] = []
|
| 555 |
+
unknown: int = 0
|
| 556 |
+
|
| 557 |
+
key_to_col = {
|
| 558 |
+
"ct_phase": ("__ct", str),
|
| 559 |
+
"manufacturer": ("__mfr", str),
|
| 560 |
+
"year": ("__year_int", int),
|
| 561 |
+
"sex": ("__sex", str),
|
| 562 |
+
"tumor": ("__tumor01", int),
|
| 563 |
+
"model": ("model", str),
|
| 564 |
+
"study_type": ("study_type", str),
|
| 565 |
+
"site_nat": ("site_nationality", str),
|
| 566 |
+
"site_nationality": ("site_nationality", str),
|
| 567 |
+
}
|
| 568 |
+
if col_key not in key_to_col:
|
| 569 |
+
return {"rows": [], "unknown": 0}
|
| 570 |
+
|
| 571 |
+
col_name, _typ = key_to_col[col_key]
|
| 572 |
+
if col_name not in df.columns:
|
| 573 |
+
return {"rows": [], "unknown": 0}
|
| 574 |
+
|
| 575 |
+
ser = df[col_name]
|
| 576 |
+
|
| 577 |
+
# ---- Year:數值化、NaN 視為 unknown ----
|
| 578 |
+
if col_key == "year":
|
| 579 |
+
s_num = pd.to_numeric(ser, errors="coerce")
|
| 580 |
+
unknown = int(s_num.isna().sum())
|
| 581 |
+
vc = s_num.dropna().astype(int).value_counts()
|
| 582 |
+
rows = [{"value": int(v), "count": int(c)} for v, c in vc.items()]
|
| 583 |
+
rows.sort(key=lambda x: (-x["count"], x["value"]))
|
| 584 |
+
if top_k and top_k > 0:
|
| 585 |
+
rows = rows[:top_k]
|
| 586 |
+
return {"rows": rows, "unknown": unknown}
|
| 587 |
+
|
| 588 |
+
# ---- 其他欄位:把空字串/unknown 類型歸入 unknown ----
|
| 589 |
+
s_str = ser.astype(str).str.strip()
|
| 590 |
+
s_lc = s_str.str.lower()
|
| 591 |
+
unknown_mask = ser.isna() | (s_str == "") | (s_lc.isin({"unknown", "nan", "none", "n/a", "na"}))
|
| 592 |
+
unknown = int(unknown_mask.sum())
|
| 593 |
+
|
| 594 |
+
vals = ser[~unknown_mask]
|
| 595 |
+
vc = vals.value_counts(dropna=False)
|
| 596 |
+
|
| 597 |
+
tmp_rows: List[Dict[str, Any]] = []
|
| 598 |
+
for v, c in vc.items():
|
| 599 |
+
if col_key == "tumor":
|
| 600 |
+
# tumor 僅接受 0/1
|
| 601 |
+
try:
|
| 602 |
+
iv = int(v)
|
| 603 |
+
except Exception:
|
| 604 |
+
continue
|
| 605 |
+
if iv not in (0, 1):
|
| 606 |
+
continue
|
| 607 |
+
tmp_rows.append({"value": iv, "count": int(c)})
|
| 608 |
+
else:
|
| 609 |
+
tmp_rows.append({"value": v, "count": int(c)})
|
| 610 |
+
|
| 611 |
+
# 排序:count desc,再 value 升(字串比較避免型別問題)
|
| 612 |
+
tmp_rows.sort(key=lambda x: (-x["count"], str(x["value"])))
|
| 613 |
+
if top_k and top_k > 0:
|
| 614 |
+
tmp_rows = tmp_rows[:top_k]
|
| 615 |
+
|
| 616 |
+
rows = tmp_rows
|
| 617 |
+
return {"rows": rows, "unknown": unknown}
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
def _prune_zero_rows(rows: List[Dict[str, Any]], keep_zero: bool) -> List[Dict[str, Any]]:
|
| 621 |
+
"""依需求濾掉 count<=0;當 keep_zero=True(對應 guarantee=1)則不濾。"""
|
| 622 |
+
if keep_zero:
|
| 623 |
+
return rows
|
| 624 |
+
out: List[Dict[str, Any]] = []
|
| 625 |
+
for r in rows or []:
|
| 626 |
+
try:
|
| 627 |
+
c = int(r.get("count") or 0)
|
| 628 |
+
except Exception:
|
| 629 |
+
c = 0
|
| 630 |
+
if c > 0:
|
| 631 |
+
out.append(r)
|
| 632 |
+
return out
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
@api_blueprint.route("/facets", methods=["GET"])
|
| 636 |
+
def api_facets():
|
| 637 |
+
try:
|
| 638 |
+
fields_raw = (_arg("fields","ct_phase,manufacturer") or "").strip()
|
| 639 |
+
fields = [f.strip().lower() for f in fields_raw.split(",") if f.strip()]
|
| 640 |
+
|
| 641 |
+
valid = {
|
| 642 |
+
"ct_phase","manufacturer","year","sex","tumor",
|
| 643 |
+
"model","study_type","site_nat","site_nationality"
|
| 644 |
+
}
|
| 645 |
+
fields = [f for f in fields if f in valid] or ["ct_phase","manufacturer"]
|
| 646 |
+
top_k = to_int(_arg("top_k","6")) or 6
|
| 647 |
+
guarantee = (_arg("guarantee","0") or "0").strip().lower() in ("1","true","yes","y")
|
| 648 |
+
|
| 649 |
+
# 先應用目前的過濾條件
|
| 650 |
+
df_now = apply_filters(DF)
|
| 651 |
+
base_for_ranges = df_now if len(df_now) else DF
|
| 652 |
+
|
| 653 |
+
facets: Dict[str, List[Dict[str, Any]]] = {}
|
| 654 |
+
unknown_counts: Dict[str, int] = {}
|
| 655 |
+
|
| 656 |
+
# 為每個 facet 準備自我排除的條件(避免自我影響)
|
| 657 |
+
exclude_map = {
|
| 658 |
+
"ct_phase": {"ct_phase"},
|
| 659 |
+
"manufacturer": {"manufacturer","mfr_is_null","manufacturer_is_null"},
|
| 660 |
+
"year": {"year_from","year_to"},
|
| 661 |
+
"sex": {"sex"},
|
| 662 |
+
"tumor": {"tumor"},
|
| 663 |
+
"model": {"model"},
|
| 664 |
+
"study_type": {"study_type"},
|
| 665 |
+
"site_nat": {"site_nat","site_nationality"},
|
| 666 |
+
"site_nationality": {"site_nat","site_nationality"},
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
for f in fields:
|
| 670 |
+
ex = exclude_map.get(f, set())
|
| 671 |
+
# 若 guarantee=1 且目前篩完為空,改用全量 DF 以「保證列出所有可能值」
|
| 672 |
+
src = (DF if (guarantee and len(df_now) == 0) else df_now)
|
| 673 |
+
df_facet = apply_filters(src, exclude=ex)
|
| 674 |
+
res = _facet_counts_with_unknown(df_facet, f, top_k=top_k)
|
| 675 |
+
|
| 676 |
+
# guarantee=0 時砍掉 count<=0 的項目
|
| 677 |
+
rows = _prune_zero_rows(res.get("rows") or [], keep_zero=guarantee)
|
| 678 |
+
facets[f] = rows
|
| 679 |
+
unknown_counts[f] = int(res.get("unknown") or 0)
|
| 680 |
+
|
| 681 |
+
# 年齡/年份範圍(原樣保留)
|
| 682 |
+
def _minmax(series: pd.Series):
|
| 683 |
+
s = series.dropna()
|
| 684 |
+
if not len(s): return (None, None)
|
| 685 |
+
return (float(s.min()), float(s.max()))
|
| 686 |
+
|
| 687 |
+
age_min = age_max = None
|
| 688 |
+
year_min = year_max = None
|
| 689 |
+
if "__age" in base_for_ranges:
|
| 690 |
+
age_min, age_max = _minmax(base_for_ranges["__age"])
|
| 691 |
+
if "__year_int" in base_for_ranges:
|
| 692 |
+
yr = base_for_ranges["__year_int"].dropna().astype(int)
|
| 693 |
+
if len(yr):
|
| 694 |
+
year_min, year_max = int(yr.min()), int(yr.max())
|
| 695 |
+
|
| 696 |
+
return jsonify({
|
| 697 |
+
"facets": facets,
|
| 698 |
+
"unknown_counts": unknown_counts,
|
| 699 |
+
"age_range": {"min": age_min, "max": age_max},
|
| 700 |
+
"year_range": {"min": year_min, "max": year_max},
|
| 701 |
+
"total": int(len(df_now)),
|
| 702 |
+
})
|
| 703 |
+
except Exception as e:
|
| 704 |
+
return jsonify({"error": str(e)}), 400
|
| 705 |
+
|
| 706 |
+
@api_blueprint.route("/random", methods=['GET'])
|
| 707 |
+
def api_random_topk_rotate_norand():
|
| 708 |
+
"""
|
| 709 |
+
推薦:完整資料優先 → 取 Top-K(預設100) → 環狀位移 → 可排除最近看過
|
| 710 |
+
排序:__spacing_sum ↑, __shape_sum ↓, __case_sortkey ↑
|
| 711 |
+
"""
|
| 712 |
+
try:
|
| 713 |
+
scope = (request.args.get("scope", "filtered") or "filtered").strip().lower()
|
| 714 |
+
base_df = apply_filters(DF)
|
| 715 |
+
if len(base_df) == 0 and scope == "all":
|
| 716 |
+
base_df = DF.copy()
|
| 717 |
+
|
| 718 |
+
base_df = ensure_sort_cols(base_df)
|
| 719 |
+
|
| 720 |
+
# 只取完整資料;若沒有完整的就退回全部
|
| 721 |
+
df_full = base_df[base_df["__complete"]] if "__complete" in base_df.columns else base_df
|
| 722 |
+
if len(df_full) == 0:
|
| 723 |
+
df_full = base_df
|
| 724 |
+
df = df_full.sort_values(
|
| 725 |
+
by=["__spacing_sum","__shape_sum","__case_sortkey"],
|
| 726 |
+
ascending=[True, False, True],
|
| 727 |
+
na_position="last",
|
| 728 |
+
kind="mergesort",
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
if len(df) == 0:
|
| 732 |
+
return jsonify({"items": [], "total": 0, "meta": {"k": 0, "used_recent": 0}}), 200
|
| 733 |
+
|
| 734 |
+
# n, k
|
| 735 |
+
try: n = int(request.args.get("n") or 3)
|
| 736 |
+
except Exception: n = 3
|
| 737 |
+
n = max(1, min(n, len(df)))
|
| 738 |
+
|
| 739 |
+
try: K = int(request.args.get("k") or 100)
|
| 740 |
+
except Exception: K = 100
|
| 741 |
+
K = max(n, min(K, len(df)))
|
| 742 |
+
|
| 743 |
+
# recent 排除
|
| 744 |
+
recent_raw = (request.args.get("recent") or "").strip()
|
| 745 |
+
used_recent = 0
|
| 746 |
+
if recent_raw:
|
| 747 |
+
recent_ids = {s.strip() for s in recent_raw.split(",") if s.strip()}
|
| 748 |
+
key = df["__case_str"].astype(str) if "__case_str" in df.columns else None
|
| 749 |
+
if key is not None:
|
| 750 |
+
mask = ~key.isin(recent_ids)
|
| 751 |
+
used_recent = int((~mask).sum())
|
| 752 |
+
df2 = df[mask]
|
| 753 |
+
if len(df2): df = df2
|
| 754 |
+
|
| 755 |
+
topk = df.iloc[:K]
|
| 756 |
+
if len(topk) == 0:
|
| 757 |
+
return jsonify({"items": [], "total": 0, "meta": {"k": 0, "used_recent": used_recent}}), 200
|
| 758 |
+
|
| 759 |
+
off_arg = request.args.get("offset")
|
| 760 |
+
if off_arg is not None:
|
| 761 |
+
try: offset = int(off_arg) % len(topk)
|
| 762 |
+
except Exception: offset = 0
|
| 763 |
+
else:
|
| 764 |
+
now = datetime.utcnow()
|
| 765 |
+
offset = ((now.minute * 60) + now.second) % len(topk)
|
| 766 |
+
|
| 767 |
+
idx = list(range(len(topk))) + list(range(len(topk)))
|
| 768 |
+
pick = idx[offset:offset + min(n, len(topk))]
|
| 769 |
+
sub = topk.iloc[pick]
|
| 770 |
+
|
| 771 |
+
items = [row_to_item(r) for _, r in sub.iterrows()]
|
| 772 |
+
resp = jsonify({
|
| 773 |
+
"items": clean_json_list(items),
|
| 774 |
+
"total": int(len(df)),
|
| 775 |
+
"meta": {"k": int(len(topk)), "used_recent": used_recent, "offset": int(offset)}
|
| 776 |
+
})
|
| 777 |
+
r = make_response(resp)
|
| 778 |
+
r.headers["Cache-Control"] = "no-store, no-cache, must-revalidate, max-age=0"
|
| 779 |
+
r.headers["Pragma"] = "no-cache"
|
| 780 |
+
r.headers["Expires"] = "0"
|
| 781 |
+
return r
|
| 782 |
+
|
| 783 |
+
except Exception as e:
|
| 784 |
+
return jsonify({"error": str(e)}), 400
|
api/utils.py
ADDED
|
@@ -0,0 +1,1328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Blueprint, send_file, make_response, request, jsonify
|
| 2 |
+
from services.nifti_processor import NiftiProcessor
|
| 3 |
+
from services.session_manager import SessionManager, generate_uuid
|
| 4 |
+
from services.auto_segmentor import run_auto_segmentation
|
| 5 |
+
from models.application_session import ApplicationSession
|
| 6 |
+
from models.combined_labels import CombinedLabels
|
| 7 |
+
from models.base import db
|
| 8 |
+
from constants import Constants
|
| 9 |
+
|
| 10 |
+
from io import BytesIO
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from reportlab.pdfgen import canvas
|
| 13 |
+
from reportlab.lib.pagesizes import letter
|
| 14 |
+
|
| 15 |
+
from typing import Any, Dict, Optional, Set, List, Tuple
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import uuid
|
| 19 |
+
import re
|
| 20 |
+
import time
|
| 21 |
+
import math
|
| 22 |
+
import numpy as np
|
| 23 |
+
import nibabel as nib
|
| 24 |
+
from scipy.ndimage import distance_transform_edt
|
| 25 |
+
from collections import defaultdict
|
| 26 |
+
from services.npz_processor import NpzProcessor
|
| 27 |
+
from PIL import Image
|
| 28 |
+
from openpyxl import load_workbook
|
| 29 |
+
import requests
|
| 30 |
+
import pandas as pd
|
| 31 |
+
# Track last session validation time
|
| 32 |
+
last_session_check = datetime.now()
|
| 33 |
+
|
| 34 |
+
# Progress tracking structure: {session_id: (start_time, expected_total_seconds)}
|
| 35 |
+
progress_tracker = {}
|
| 36 |
+
|
| 37 |
+
def id_is_training(index):
|
| 38 |
+
return index < 9000
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def combine_label_npz(index: int):
|
| 43 |
+
npz_processor = NpzProcessor()
|
| 44 |
+
npz_processor.combine_labels(index)
|
| 45 |
+
return
|
| 46 |
+
def get_panTS_id(index):
|
| 47 |
+
cur_case_id = str(index)
|
| 48 |
+
iter = max(0, 8 - len(str(index)))
|
| 49 |
+
for _ in range(iter):
|
| 50 |
+
cur_case_id = "0" + cur_case_id
|
| 51 |
+
cur_case_id = "PanTS_" + cur_case_id
|
| 52 |
+
return cur_case_id
|
| 53 |
+
|
| 54 |
+
def clean_nan(obj):
|
| 55 |
+
"""Recursively replace NaN with None for JSON serialization."""
|
| 56 |
+
if isinstance(obj, dict):
|
| 57 |
+
return {k: clean_nan(v) for k, v in obj.items()}
|
| 58 |
+
elif isinstance(obj, list):
|
| 59 |
+
return [clean_nan(elem) for elem in obj]
|
| 60 |
+
elif isinstance(obj, float) and math.isnan(obj):
|
| 61 |
+
return None
|
| 62 |
+
else:
|
| 63 |
+
return obj
|
| 64 |
+
|
| 65 |
+
def format_value(value):
|
| 66 |
+
"""Format values for display, replacing 999999 or None with 'N/A'."""
|
| 67 |
+
return "N/A" if value in [999999, None] else str(value)
|
| 68 |
+
|
| 69 |
+
def organname_to_name(filename):
|
| 70 |
+
"""Convert a NIfTI file name to a human-readable organ name."""
|
| 71 |
+
name = filename.replace(".nii.gz", "").replace("_", " ")
|
| 72 |
+
return name.title()
|
| 73 |
+
|
| 74 |
+
def get_mask_data_internal(id, fallback=False):
|
| 75 |
+
"""Retrieve or compute organ metadata from NIfTI and mask paths for a session."""
|
| 76 |
+
try:
|
| 77 |
+
subfolder = "ImageTr" if int(id) < 9000 else "ImageTe"
|
| 78 |
+
label_subfolder = "LabelTr" if int(id) < 9000 else "LabelTe"
|
| 79 |
+
main_nifti_path = f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(id)}/{Constants.MAIN_NIFTI_FILENAME}"
|
| 80 |
+
combined_labels_path = f"{Constants.PANTS_PATH}/data/{label_subfolder}/{get_panTS_id(id)}/{Constants.COMBINED_LABELS_NIFTI_FILENAME}"
|
| 81 |
+
print(f"[INFO] Processing NIFTI for id {id}")
|
| 82 |
+
organ_intensities = None
|
| 83 |
+
|
| 84 |
+
organ_intensities_path = f"{Constants.PANTS_PATH}/data/{label_subfolder}/{get_panTS_id(id)}/{Constants.ORGAN_INTENSITIES_FILENAME}"
|
| 85 |
+
if not os.path.exists(organ_intensities_path) or not os.path.exists(combined_labels_path):
|
| 86 |
+
npz_processor = NpzProcessor()
|
| 87 |
+
labels, organ_intensities = npz_processor.combine_labels(int(id), keywords={"pancrea": "pancreas"}, save=True)
|
| 88 |
+
else:
|
| 89 |
+
with open(organ_intensities_path, "r") as f:
|
| 90 |
+
organ_intensities = json.load(f)
|
| 91 |
+
|
| 92 |
+
nifti_processor = NiftiProcessor(main_nifti_path, combined_labels_path)
|
| 93 |
+
nifti_processor.set_organ_intensities(organ_intensities)
|
| 94 |
+
organ_metadata = nifti_processor.calculate_metrics()
|
| 95 |
+
organ_metadata = clean_nan(organ_metadata)
|
| 96 |
+
|
| 97 |
+
return organ_metadata
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
print(f"[ERROR] get_mask_data_internal: {e}")
|
| 101 |
+
return {"error": str(e)}
|
| 102 |
+
|
| 103 |
+
def generate_distinct_colors(n):
|
| 104 |
+
"""Generate n visually distinct RGB colors."""
|
| 105 |
+
import colorsys
|
| 106 |
+
HSV_tuples = [(x / n, 0.7, 0.9) for x in range(n)]
|
| 107 |
+
RGB_tuples = [tuple(int(c * 255) for c in colorsys.hsv_to_rgb(*hsv)) for hsv in HSV_tuples]
|
| 108 |
+
return RGB_tuples
|
| 109 |
+
|
| 110 |
+
def fill_voids_with_nearest_label(label_array):
|
| 111 |
+
"""Fill all 0-valued voxels with the nearest non-zero label."""
|
| 112 |
+
mask = label_array == 0
|
| 113 |
+
if not np.any(mask):
|
| 114 |
+
return label_array
|
| 115 |
+
|
| 116 |
+
nonzero_coords = np.array(np.nonzero(label_array)).T
|
| 117 |
+
distances, indices = distance_transform_edt(mask, return_indices=True)
|
| 118 |
+
filled_array = label_array.copy()
|
| 119 |
+
filled_array[mask] = label_array[tuple(indices[:, mask])]
|
| 120 |
+
return filled_array
|
| 121 |
+
|
| 122 |
+
def build_adjacency_graph(label_array):
|
| 123 |
+
"""Build adjacency graph of label connectivity in 6 directions."""
|
| 124 |
+
adjacency = defaultdict(set)
|
| 125 |
+
offsets = [(-1, 0, 0), (1, 0, 0),
|
| 126 |
+
(0, -1, 0), (0, 1, 0),
|
| 127 |
+
(0, 0, -1), (0, 0, 1)]
|
| 128 |
+
|
| 129 |
+
for dx, dy, dz in offsets:
|
| 130 |
+
shifted = np.roll(label_array, shift=(dx, dy, dz), axis=(0, 1, 2))
|
| 131 |
+
mask = (label_array != shifted) & (label_array != 0) & (shifted != 0)
|
| 132 |
+
l1 = label_array[mask]
|
| 133 |
+
l2 = shifted[mask]
|
| 134 |
+
for a, b in zip(l1, l2):
|
| 135 |
+
if a != b:
|
| 136 |
+
adjacency[a].add(b)
|
| 137 |
+
adjacency[b].add(a)
|
| 138 |
+
return adjacency
|
| 139 |
+
|
| 140 |
+
def assign_colors_with_high_contrast(label_ids, adjacency_graph, min_initial_colors=20, max_total_colors=50):
|
| 141 |
+
"""
|
| 142 |
+
Assign colors to labels such that adjacent labels have different colors,
|
| 143 |
+
maximizing contrast and balance.
|
| 144 |
+
"""
|
| 145 |
+
from itertools import combinations
|
| 146 |
+
import colorsys
|
| 147 |
+
|
| 148 |
+
def generate_distinct_colors(n):
|
| 149 |
+
HSV_tuples = [(x / n, 0.7, 0.9) for x in range(n)]
|
| 150 |
+
RGB_tuples = [tuple(int(c * 255) for c in colorsys.hsv_to_rgb(*hsv)) for hsv in HSV_tuples]
|
| 151 |
+
return RGB_tuples
|
| 152 |
+
|
| 153 |
+
def can_use_color(label, color_idx, assignments, adjacency_graph):
|
| 154 |
+
for neighbor in adjacency_graph[label]:
|
| 155 |
+
if assignments.get(neighbor) == color_idx:
|
| 156 |
+
return False
|
| 157 |
+
return True
|
| 158 |
+
|
| 159 |
+
label_ids = sorted(label_ids)
|
| 160 |
+
assignments = {}
|
| 161 |
+
num_colors = min_initial_colors
|
| 162 |
+
color_usage_count = {i: 0 for i in range(num_colors)}
|
| 163 |
+
|
| 164 |
+
while True:
|
| 165 |
+
colors = generate_distinct_colors(num_colors)
|
| 166 |
+
assignments.clear()
|
| 167 |
+
color_usage_count = {i: 0 for i in range(num_colors)}
|
| 168 |
+
success = True
|
| 169 |
+
|
| 170 |
+
for label in label_ids:
|
| 171 |
+
color_order = sorted(range(num_colors), key=lambda c: (color_usage_count[c], c))
|
| 172 |
+
for color_idx in color_order:
|
| 173 |
+
if can_use_color(label, color_idx, assignments, adjacency_graph):
|
| 174 |
+
assignments[label] = color_idx
|
| 175 |
+
color_usage_count[color_idx] += 1
|
| 176 |
+
break
|
| 177 |
+
else:
|
| 178 |
+
success = False
|
| 179 |
+
break
|
| 180 |
+
|
| 181 |
+
if success:
|
| 182 |
+
break
|
| 183 |
+
elif num_colors >= max_total_colors:
|
| 184 |
+
print(f"⚠️ Warning: reached max color count {max_total_colors}, some neighbors may share color")
|
| 185 |
+
break
|
| 186 |
+
else:
|
| 187 |
+
num_colors += 1
|
| 188 |
+
|
| 189 |
+
final_colors = generate_distinct_colors(num_colors)
|
| 190 |
+
print(f"✅ Final color count used: {len(set(assignments.values()))}")
|
| 191 |
+
|
| 192 |
+
color_map = {
|
| 193 |
+
str(round(label)): {
|
| 194 |
+
"R": final_colors[color_idx][0],
|
| 195 |
+
"G": final_colors[color_idx][1],
|
| 196 |
+
"B": final_colors[color_idx][2],
|
| 197 |
+
"A": 128
|
| 198 |
+
}
|
| 199 |
+
for label, color_idx in assignments.items()
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
return color_map, color_usage_count
|
| 203 |
+
|
| 204 |
+
def wait_for_file(filepath, timeout=30, check_interval=0.5):
|
| 205 |
+
"""Wait until a file exists, or timeout is reached."""
|
| 206 |
+
start_time = time.time()
|
| 207 |
+
while not os.path.exists(filepath):
|
| 208 |
+
if time.time() - start_time > timeout:
|
| 209 |
+
raise TimeoutError(f"Timeout: File {filepath} not found after {timeout} seconds.")
|
| 210 |
+
time.sleep(check_interval)
|
| 211 |
+
|
| 212 |
+
def volume_to_png(volume, axis=2, index=None):
|
| 213 |
+
if index is None:
|
| 214 |
+
index = volume.shape[axis] // 2
|
| 215 |
+
|
| 216 |
+
slice_ = np.take(volume, index, axis=axis)
|
| 217 |
+
# window_center = 40
|
| 218 |
+
# window_width = 400
|
| 219 |
+
# min_val = window_center - window_width / 2
|
| 220 |
+
# max_val = window_center + window_width / 2
|
| 221 |
+
|
| 222 |
+
# slice_clipped = np.clip(slice_, min_val, max_val)
|
| 223 |
+
# slice_norm = 255 * (slice_clipped - min_val) / (max_val - min_val)
|
| 224 |
+
slice_norm = 255 * (slice_ - np.min(slice_)) / (np.max(slice_) - np.min(slice_))
|
| 225 |
+
slice_norm = slice_norm.astype(np.uint8)
|
| 226 |
+
|
| 227 |
+
slice_norm = np.rot90(slice_norm, k=1)
|
| 228 |
+
slice_norm = np.flip(slice_norm, axis=0)
|
| 229 |
+
|
| 230 |
+
pil_img = Image.fromarray(slice_norm)
|
| 231 |
+
buf = BytesIO()
|
| 232 |
+
pil_img.save(buf, format="PNG")
|
| 233 |
+
buf.seek(0)
|
| 234 |
+
return buf
|
| 235 |
+
def generate_pdf_with_template(
|
| 236 |
+
output_pdf,
|
| 237 |
+
folder_name,
|
| 238 |
+
ct_path,
|
| 239 |
+
mask_path,
|
| 240 |
+
template_pdf,
|
| 241 |
+
temp_pdf_path,
|
| 242 |
+
id,
|
| 243 |
+
extracted_data=None,
|
| 244 |
+
column_headers=None,
|
| 245 |
+
):
|
| 246 |
+
import os
|
| 247 |
+
import nibabel as nib
|
| 248 |
+
import numpy as np
|
| 249 |
+
import pandas as pd
|
| 250 |
+
from PyPDF2 import PdfReader, PdfWriter
|
| 251 |
+
from PyPDF2._page import PageObject
|
| 252 |
+
from reportlab.pdfgen import canvas
|
| 253 |
+
from reportlab.lib.pagesizes import letter
|
| 254 |
+
|
| 255 |
+
LABELS = {v: k for k, v in Constants.PREDEFINED_LABELS.items()}
|
| 256 |
+
NAME_TO_ORGAN = {
|
| 257 |
+
# Pancreas and its lesions
|
| 258 |
+
"pancreas": "pancreas",
|
| 259 |
+
"pancreas_body": "pancreas",
|
| 260 |
+
"pancreas_head": "pancreas",
|
| 261 |
+
"pancreas_tail": "pancreas",
|
| 262 |
+
"pancreatic_lesion": "pancreas",
|
| 263 |
+
"pancreatic_duct": "pancreas",
|
| 264 |
+
|
| 265 |
+
# All other organs: map to self
|
| 266 |
+
"aorta": "aorta",
|
| 267 |
+
"adrenal_gland_left": "adrenal_gland_left",
|
| 268 |
+
"adrenal_gland_right": "adrenal_gland_right",
|
| 269 |
+
"bladder": "bladder",
|
| 270 |
+
"common_bile_duct": "common_bile_duct",
|
| 271 |
+
"celic_artery": "celiac_artery",
|
| 272 |
+
"colon": "colon",
|
| 273 |
+
"duodenum": "duodenum",
|
| 274 |
+
"femur_right": "femur_right",
|
| 275 |
+
"femur_left": "femur_left",
|
| 276 |
+
"gall_bladder": "gall_bladder",
|
| 277 |
+
"postcava": "postcava",
|
| 278 |
+
"kidney_left": "kidney_left",
|
| 279 |
+
"kidney_right": "kidney_right",
|
| 280 |
+
"liver": "liver",
|
| 281 |
+
"postcava": "postcava",
|
| 282 |
+
"prostate": "prostate",
|
| 283 |
+
"superior_mesenteric_artery": "superior_mesenteric_artery",
|
| 284 |
+
"intestine": "intestine",
|
| 285 |
+
"spleen": "spleen",
|
| 286 |
+
"stomach": "stomach",
|
| 287 |
+
"veins": "veins",
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
try:
|
| 291 |
+
temp_pdf = canvas.Canvas(temp_pdf_path, pagesize=letter)
|
| 292 |
+
width, height = letter
|
| 293 |
+
left_margin, top_margin = 50, 100
|
| 294 |
+
line_height, section_spacing = 12, 30
|
| 295 |
+
y_position = height - top_margin
|
| 296 |
+
|
| 297 |
+
def reset_page():
|
| 298 |
+
nonlocal y_position
|
| 299 |
+
temp_pdf.showPage()
|
| 300 |
+
y_position = height - 120
|
| 301 |
+
temp_pdf.setFont("Helvetica", 10)
|
| 302 |
+
|
| 303 |
+
def write_wrapped_text(x, y, content, bold=False, font_size=10, max_width=None):
|
| 304 |
+
temp_pdf.setFont("Helvetica-Bold" if bold else "Helvetica", font_size)
|
| 305 |
+
words = content.split()
|
| 306 |
+
current_line = ""
|
| 307 |
+
max_width = max_width or width - left_margin * 2
|
| 308 |
+
for word in words:
|
| 309 |
+
if temp_pdf.stringWidth(current_line + word + " ", "Helvetica", font_size) > max_width:
|
| 310 |
+
temp_pdf.drawString(x, y, current_line.strip())
|
| 311 |
+
y -= line_height
|
| 312 |
+
current_line = f"{word} "
|
| 313 |
+
if y < 50:
|
| 314 |
+
reset_page()
|
| 315 |
+
y = y_position
|
| 316 |
+
else:
|
| 317 |
+
current_line += f"{word} "
|
| 318 |
+
if current_line:
|
| 319 |
+
temp_pdf.drawString(x, y, current_line.strip())
|
| 320 |
+
y -= line_height
|
| 321 |
+
return y
|
| 322 |
+
|
| 323 |
+
def safe_extract(index, default="N/A"):
|
| 324 |
+
if extracted_data is not None and index in extracted_data:
|
| 325 |
+
val = extracted_data[index]
|
| 326 |
+
return "N/A" if pd.isna(val) else val
|
| 327 |
+
return default
|
| 328 |
+
|
| 329 |
+
wb = load_workbook(os.path.join(Constants.PANTS_PATH, "data", "metadata.xlsx"))
|
| 330 |
+
sheet = wb["PanTS_metadata"]
|
| 331 |
+
age = None
|
| 332 |
+
sex = "-"
|
| 333 |
+
contrast = ""
|
| 334 |
+
study_detail = ""
|
| 335 |
+
for row in sheet.iter_rows(values_only=True):
|
| 336 |
+
if row[0] == get_panTS_id(folder_name):
|
| 337 |
+
age = row[5]
|
| 338 |
+
sex = row[4]
|
| 339 |
+
contrast = row[3]
|
| 340 |
+
study_detail = row[8]
|
| 341 |
+
break
|
| 342 |
+
|
| 343 |
+
# Title
|
| 344 |
+
temp_pdf.setFont("Helvetica-Bold", 26)
|
| 345 |
+
title_text = "MEDICAL REPORT"
|
| 346 |
+
title_width = temp_pdf.stringWidth(title_text, "Helvetica-Bold", 26)
|
| 347 |
+
temp_pdf.drawString((width - title_width) / 2, height - 70, title_text)
|
| 348 |
+
y_position = height - 100
|
| 349 |
+
|
| 350 |
+
# Patient info
|
| 351 |
+
temp_pdf.setFont("Helvetica-Bold", 12)
|
| 352 |
+
temp_pdf.drawString(left_margin, y_position, "PATIENT INFORMATION")
|
| 353 |
+
y_position -= line_height
|
| 354 |
+
|
| 355 |
+
left_y = write_wrapped_text(left_margin, y_position, f"PANTS ID: {folder_name}")
|
| 356 |
+
right_y = write_wrapped_text(width / 2, y_position, f"Sex: {sex}")
|
| 357 |
+
y_position -= line_height
|
| 358 |
+
|
| 359 |
+
write_wrapped_text(left_margin, y_position, f"Age: {age}")
|
| 360 |
+
|
| 361 |
+
y_position = min(left_y, right_y) - section_spacing
|
| 362 |
+
|
| 363 |
+
# Imaging detail
|
| 364 |
+
temp_pdf.setFont("Helvetica-Bold", 12)
|
| 365 |
+
temp_pdf.drawString(left_margin, y_position, "IMAGING DETAIL")
|
| 366 |
+
y_position -= line_height
|
| 367 |
+
|
| 368 |
+
ct_nii = nib.load(ct_path)
|
| 369 |
+
spacing = ct_nii.header.get_zooms()
|
| 370 |
+
shape = ct_nii.shape
|
| 371 |
+
|
| 372 |
+
try:
|
| 373 |
+
scanner_info = str(ct_nii.header['descrip'].tobytes().decode('utf-8')).strip().replace('\x00', '')
|
| 374 |
+
except Exception:
|
| 375 |
+
scanner_info = "N/A"
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
y_position = write_wrapped_text(left_margin, y_position, f"Spacing: {spacing}")
|
| 379 |
+
y_position = write_wrapped_text(left_margin, y_position, f"Shape: {shape}")
|
| 380 |
+
y_position = write_wrapped_text(left_margin, y_position, f"Study type: {study_detail}")
|
| 381 |
+
y_position = write_wrapped_text(left_margin, y_position, f"Contrast: {contrast}")
|
| 382 |
+
y_position -= section_spacing
|
| 383 |
+
|
| 384 |
+
# Load image data
|
| 385 |
+
ct_array = ct_nii.get_fdata()
|
| 386 |
+
mask_array = nib.load(mask_path).get_fdata().astype(np.uint8)
|
| 387 |
+
voxel_volume = np.prod(nib.load(mask_path).header.get_zooms()) / 1000 # mm³ to cm³
|
| 388 |
+
print(np.unique(mask_array))
|
| 389 |
+
|
| 390 |
+
# AI Measurements
|
| 391 |
+
temp_pdf.setFont("Helvetica-Bold", 12)
|
| 392 |
+
temp_pdf.drawString(left_margin, y_position, "AI MEASUREMENTS")
|
| 393 |
+
y_position -= line_height
|
| 394 |
+
|
| 395 |
+
# Table configuration
|
| 396 |
+
headers = ["Organ", "Volume (cc)", "Mean HU"]
|
| 397 |
+
col_widths = [120, 100, 100]
|
| 398 |
+
row_height = 20
|
| 399 |
+
|
| 400 |
+
def draw_table_row(row_data, is_header=False):
|
| 401 |
+
nonlocal y_position
|
| 402 |
+
if y_position - row_height < 50:
|
| 403 |
+
reset_page()
|
| 404 |
+
temp_pdf.setFont("Helvetica-Bold", 12)
|
| 405 |
+
temp_pdf.drawString(left_margin, y_position, "AI MEASUREMENTS (continued)")
|
| 406 |
+
y_position -= line_height
|
| 407 |
+
draw_table_row(headers, is_header=True)
|
| 408 |
+
x = left_margin
|
| 409 |
+
temp_pdf.setFont("Helvetica-Bold" if is_header else "Helvetica", 9)
|
| 410 |
+
for i, cell in enumerate(row_data):
|
| 411 |
+
temp_pdf.drawString(x + 2, y_position - row_height + 5, str(cell))
|
| 412 |
+
temp_pdf.line(x, y_position, x, y_position - row_height)
|
| 413 |
+
x += col_widths[i]
|
| 414 |
+
temp_pdf.line(left_margin + sum(col_widths), y_position, left_margin + sum(col_widths), y_position - row_height)
|
| 415 |
+
temp_pdf.line(left_margin, y_position, left_margin + sum(col_widths), y_position)
|
| 416 |
+
y_position -= row_height
|
| 417 |
+
temp_pdf.line(left_margin, y_position, left_margin + sum(col_widths), y_position)
|
| 418 |
+
|
| 419 |
+
draw_table_row(headers, is_header=True)
|
| 420 |
+
|
| 421 |
+
lession_volume_dict={}
|
| 422 |
+
for organ, label_id in LABELS.items():
|
| 423 |
+
if organ in NAME_TO_ORGAN and NAME_TO_ORGAN[organ] != organ:
|
| 424 |
+
mask = (mask_array == label_id)
|
| 425 |
+
if not np.any(mask):
|
| 426 |
+
print("none")
|
| 427 |
+
continue
|
| 428 |
+
volume = np.sum(mask) * voxel_volume
|
| 429 |
+
mean_hu = np.mean(ct_array[mask])
|
| 430 |
+
if NAME_TO_ORGAN[organ] in lession_volume_dict:
|
| 431 |
+
lession_volume_dict[NAME_TO_ORGAN[organ]]["number"] += 1
|
| 432 |
+
lession_volume_dict[NAME_TO_ORGAN[organ]]["volume"] += volume
|
| 433 |
+
else:
|
| 434 |
+
lession_volume_dict[NAME_TO_ORGAN[organ]] = {
|
| 435 |
+
"number": 1,
|
| 436 |
+
"volume": volume
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
print(lession_volume_dict)
|
| 440 |
+
|
| 441 |
+
for organ, label_id in LABELS.items():
|
| 442 |
+
if organ in NAME_TO_ORGAN and NAME_TO_ORGAN[organ] != organ:
|
| 443 |
+
continue
|
| 444 |
+
if label_id == 0:
|
| 445 |
+
continue
|
| 446 |
+
mask = (mask_array == label_id)
|
| 447 |
+
if not np.any(mask):
|
| 448 |
+
continue
|
| 449 |
+
volume = np.sum(mask) * voxel_volume
|
| 450 |
+
mean_hu = np.mean(ct_array[mask])
|
| 451 |
+
|
| 452 |
+
if organ in lession_volume_dict:
|
| 453 |
+
row = [organ.replace('_', ' '), f"{volume:.2f}", f"{mean_hu:.1f}"]
|
| 454 |
+
else:
|
| 455 |
+
row = [organ.replace('_', ' '), f"{volume:.2f}", f"{mean_hu:.1f}"]
|
| 456 |
+
draw_table_row(row)
|
| 457 |
+
|
| 458 |
+
# y_position -= section_spacing
|
| 459 |
+
|
| 460 |
+
# === Step 2: PDAC Staging ===
|
| 461 |
+
# temp_pdf.setFont("Helvetica-Bold", 12)
|
| 462 |
+
# temp_pdf.drawString(left_margin, y_position, "PDAC STAGING")
|
| 463 |
+
# y_position -= line_height
|
| 464 |
+
|
| 465 |
+
# try:
|
| 466 |
+
# pdac_info = get_pdac_staging(id)
|
| 467 |
+
# print(pdac_info, id)
|
| 468 |
+
# pdac_text = pdac_info.get("staging_description", "No staging data available.")
|
| 469 |
+
# except Exception:
|
| 470 |
+
# pdac_text = "Error fetching PDAC staging information."
|
| 471 |
+
|
| 472 |
+
# y_position = write_wrapped_text(left_margin, y_position, pdac_text, bold=False, font_size=10)
|
| 473 |
+
# === Step 3: Key Images ===
|
| 474 |
+
|
| 475 |
+
# include_liver = np.count_nonzero(mask_array == LABELS["liver"]) > 0
|
| 476 |
+
# include_pancreas = lession_volume_dict.get("pancreas", {}).get("number", 0) > 0
|
| 477 |
+
# include_kidney = np.count_nonzero(mask_array == LABELS["kidney_left"]) > 0 or np.count_nonzero(mask_array == LABELS["kidney_right"]) > 0
|
| 478 |
+
# print(include_liver, include_pancreas, include_kidney)
|
| 479 |
+
# if include_liver or include_pancreas or include_kidney:
|
| 480 |
+
# def check_and_reset_page(space_needed):
|
| 481 |
+
# nonlocal y_position
|
| 482 |
+
# if y_position - space_needed < 50:
|
| 483 |
+
# reset_page()
|
| 484 |
+
|
| 485 |
+
# temp_pdf.showPage()
|
| 486 |
+
# y_position = height - top_margin
|
| 487 |
+
# temp_pdf.setFont("Helvetica-Bold", 14)
|
| 488 |
+
# # temp_pdf.drawString(left_margin, y_position, "KEY IMAGES")
|
| 489 |
+
# y_position -= section_spacing
|
| 490 |
+
|
| 491 |
+
# organs = {
|
| 492 |
+
# "liver": include_liver,
|
| 493 |
+
# "pancreas": include_pancreas,
|
| 494 |
+
# "kidney_left": include_kidney,
|
| 495 |
+
# "kidney_right": include_kidney
|
| 496 |
+
# }
|
| 497 |
+
# download_clean_folder(ct_path.replace("/inputs/", "/outputs/").rsplit("/", 1)[0])
|
| 498 |
+
# for organ in organs:
|
| 499 |
+
# organ_data = lession_volume_dict.get(organ)
|
| 500 |
+
# if not organ_data or organ_data.get("number", 0) == 0:
|
| 501 |
+
# continue
|
| 502 |
+
|
| 503 |
+
# header = f"{organ.replace('_', ' ').upper()} TUMORS"
|
| 504 |
+
# check_and_reset_page(line_height)
|
| 505 |
+
# temp_pdf.setFont("Helvetica", 12)
|
| 506 |
+
# temp_pdf.drawString(left_margin, y_position, header)
|
| 507 |
+
# y_position -= line_height
|
| 508 |
+
# print(organ, organ_data)
|
| 509 |
+
# check_and_reset_page(220)
|
| 510 |
+
# overlay_path = f"/tmp/{organ}_overlay.png"
|
| 511 |
+
# print(ct_path, mask_path)
|
| 512 |
+
# organ_mask_path = mask_path.replace('combined_labels.nii.gz', 'segmentations/'+organ+'.nii.gz')
|
| 513 |
+
# print(organ_mask_path)
|
| 514 |
+
# if create_overlay_image(ct_path, organ_mask_path, overlay_path, color="red"):
|
| 515 |
+
# try:
|
| 516 |
+
# temp_pdf.drawImage(overlay_path, left_margin, y_position - 200, width=200, height=200)
|
| 517 |
+
# except:
|
| 518 |
+
# print(overlay_path)
|
| 519 |
+
# check_and_reset_page(220)
|
| 520 |
+
# zoom_path = f"/tmp/{organ}_zoomed.png"
|
| 521 |
+
# if zoom_into_labeled_area(ct_path, organ_mask_path, zoom_path, color="red"):
|
| 522 |
+
# temp_pdf.drawImage(zoom_path, left_margin + 250, y_position - 205, width=210, height=210)
|
| 523 |
+
# print('521')
|
| 524 |
+
# y_position -= 220
|
| 525 |
+
|
| 526 |
+
temp_pdf.save()
|
| 527 |
+
|
| 528 |
+
# Merge with template
|
| 529 |
+
template_reader = PdfReader(template_pdf)
|
| 530 |
+
content_reader = PdfReader(temp_pdf_path)
|
| 531 |
+
writer = PdfWriter()
|
| 532 |
+
|
| 533 |
+
for page in content_reader.pages:
|
| 534 |
+
template_page = template_reader.pages[0]
|
| 535 |
+
merged_page = PageObject.create_blank_page(
|
| 536 |
+
width=template_page.mediabox.width,
|
| 537 |
+
height=template_page.mediabox.height
|
| 538 |
+
)
|
| 539 |
+
merged_page.merge_page(template_page)
|
| 540 |
+
merged_page.merge_page(page)
|
| 541 |
+
writer.add_page(merged_page)
|
| 542 |
+
|
| 543 |
+
with open(output_pdf, "wb") as f:
|
| 544 |
+
writer.write(f)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
except Exception as e:
|
| 548 |
+
raise RuntimeError(f"Error generating PDF for {folder_name}: {e}")
|
| 549 |
+
finally:
|
| 550 |
+
if os.path.exists(temp_pdf_path):
|
| 551 |
+
os.remove(temp_pdf_path)
|
| 552 |
+
|
| 553 |
+
# Helper Function to Process CT and Mask
|
| 554 |
+
def get_most_labeled_slice(ct_path, mask_path, output_png, contrast_min=-150, contrast_max=250):
|
| 555 |
+
"""
|
| 556 |
+
Load CT and mask, ensure RAS orientation, find the most labeled slice, and generate an overlay image.
|
| 557 |
+
"""
|
| 558 |
+
|
| 559 |
+
try:
|
| 560 |
+
import SimpleITK as sitk
|
| 561 |
+
import matplotlib
|
| 562 |
+
matplotlib.use('Agg') # ✅ 关键:不再尝试调用 GUI
|
| 563 |
+
|
| 564 |
+
import matplotlib.pyplot as plt
|
| 565 |
+
|
| 566 |
+
# Load the CT scan and mask
|
| 567 |
+
ct_scan = sitk.ReadImage(ct_path)
|
| 568 |
+
print('543',mask_path)
|
| 569 |
+
mask = sitk.ReadImage(mask_path)
|
| 570 |
+
print(mask_path)
|
| 571 |
+
# Reorient to RAS
|
| 572 |
+
ct_scan = sitk.DICOMOrient(ct_scan, 'RAS')
|
| 573 |
+
mask = sitk.DICOMOrient(mask, 'RAS')
|
| 574 |
+
|
| 575 |
+
# Convert to numpy arrays
|
| 576 |
+
ct_array = sitk.GetArrayFromImage(ct_scan)
|
| 577 |
+
mask_array = sitk.GetArrayFromImage(mask)
|
| 578 |
+
|
| 579 |
+
# Check for shape mismatches
|
| 580 |
+
if ct_array.shape != mask_array.shape:
|
| 581 |
+
raise ValueError(f"Shape mismatch: CT shape {ct_array.shape}, Mask shape {mask_array.shape}")
|
| 582 |
+
|
| 583 |
+
# Find the slice with the most labels
|
| 584 |
+
slice_sums = np.sum(mask_array, axis=(1, 2))
|
| 585 |
+
most_labeled_slice_index = np.argmax(slice_sums)
|
| 586 |
+
|
| 587 |
+
# Get the CT and mask slices
|
| 588 |
+
ct_slice = ct_array[most_labeled_slice_index]
|
| 589 |
+
mask_slice = mask_array[most_labeled_slice_index]
|
| 590 |
+
|
| 591 |
+
# Apply mirroring
|
| 592 |
+
ct_slice = np.fliplr(ct_slice)
|
| 593 |
+
mask_slice = np.fliplr(mask_slice)
|
| 594 |
+
|
| 595 |
+
# Apply contrast adjustment
|
| 596 |
+
ct_slice = np.clip(ct_slice, contrast_min, contrast_max)
|
| 597 |
+
ct_slice = (ct_slice - contrast_min) / (contrast_max - contrast_min) * 255
|
| 598 |
+
ct_slice = ct_slice.astype(np.uint8)
|
| 599 |
+
|
| 600 |
+
# Overlay mask contours on CT slice
|
| 601 |
+
plt.figure(figsize=(6, 6))
|
| 602 |
+
plt.imshow(ct_slice, cmap='gray', origin='lower')
|
| 603 |
+
plt.contour(mask_slice, colors='red', linewidths=1) # Use red contours for the mask
|
| 604 |
+
plt.axis('off')
|
| 605 |
+
plt.savefig(output_png, bbox_inches="tight", pad_inches=0)
|
| 606 |
+
plt.close()
|
| 607 |
+
print('586')
|
| 608 |
+
return True
|
| 609 |
+
except:
|
| 610 |
+
return False
|
| 611 |
+
|
| 612 |
+
def create_overlay_image(ct_path, mask_path, output_path, color="red"):
|
| 613 |
+
"""
|
| 614 |
+
Generate overlay images for most labeled slices using the unified RAS orientation logic.
|
| 615 |
+
"""
|
| 616 |
+
return get_most_labeled_slice(ct_path, mask_path, output_path)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
# Helper Function to Zoom into Labeled Area
|
| 620 |
+
def zoom_into_labeled_area(ct_path, mask_path, output_path, color="red"):
|
| 621 |
+
"""
|
| 622 |
+
Create a zoomed-in view of the largest labeled area with consistent RAS orientation.
|
| 623 |
+
"""
|
| 624 |
+
import SimpleITK as sitk
|
| 625 |
+
import matplotlib.pyplot as plt
|
| 626 |
+
try:
|
| 627 |
+
# Load the CT scan and mask
|
| 628 |
+
ct_scan = sitk.ReadImage(ct_path)
|
| 629 |
+
mask = sitk.ReadImage(mask_path)
|
| 630 |
+
|
| 631 |
+
# Reorient to RAS
|
| 632 |
+
ct_scan = sitk.DICOMOrient(ct_scan, 'RAS')
|
| 633 |
+
mask = sitk.DICOMOrient(mask, 'RAS')
|
| 634 |
+
|
| 635 |
+
# Convert to numpy arrays
|
| 636 |
+
ct_array = sitk.GetArrayFromImage(ct_scan)
|
| 637 |
+
mask_array = sitk.GetArrayFromImage(mask)
|
| 638 |
+
|
| 639 |
+
# Check for shape mismatches
|
| 640 |
+
if ct_array.shape != mask_array.shape:
|
| 641 |
+
raise ValueError(f"Shape mismatch: CT shape {ct_array.shape}, Mask shape {mask_array.shape}")
|
| 642 |
+
|
| 643 |
+
# Find the slice with the most labels
|
| 644 |
+
slice_sums = np.sum(mask_array, axis=(1, 2))
|
| 645 |
+
largest_slice_idx = np.argmax(slice_sums)
|
| 646 |
+
if slice_sums[largest_slice_idx] == 0:
|
| 647 |
+
raise ValueError("No labeled area found in the mask.")
|
| 648 |
+
|
| 649 |
+
# Get the mask slice and calculate the bounding box
|
| 650 |
+
mask_slice = mask_array[largest_slice_idx]
|
| 651 |
+
coords = np.array(np.where(mask_slice))
|
| 652 |
+
min_row, max_row = np.min(coords[0]), np.max(coords[0])
|
| 653 |
+
min_col, max_col = np.min(coords[1]), np.max(coords[1])
|
| 654 |
+
padding = 20
|
| 655 |
+
min_row = max(min_row - padding, 0)
|
| 656 |
+
max_row = min(max_row + padding, mask_slice.shape[0])
|
| 657 |
+
min_col = max(min_col - padding, 0)
|
| 658 |
+
max_col = min(max_col + padding, mask_slice.shape[1])
|
| 659 |
+
|
| 660 |
+
# Extract the zoomed region
|
| 661 |
+
zoomed_image = ct_array[largest_slice_idx][min_row:max_row, min_col:max_col]
|
| 662 |
+
zoomed_mask = mask_array[largest_slice_idx][min_row:max_row, min_col:max_col]
|
| 663 |
+
|
| 664 |
+
# Apply mirroring
|
| 665 |
+
zoomed_image = np.fliplr(zoomed_image)
|
| 666 |
+
zoomed_mask = np.fliplr(zoomed_mask)
|
| 667 |
+
|
| 668 |
+
# Apply contrast adjustment to the zoomed CT slice
|
| 669 |
+
zoomed_image = np.clip(zoomed_image, -150, 250)
|
| 670 |
+
zoomed_image = (zoomed_image + 150) / 400 * 255
|
| 671 |
+
zoomed_image = zoomed_image.astype(np.uint8)
|
| 672 |
+
|
| 673 |
+
# Save the zoomed-in image with overlay
|
| 674 |
+
plt.figure(figsize=(6, 6))
|
| 675 |
+
plt.imshow(zoomed_image, cmap="gray", origin="lower")
|
| 676 |
+
plt.contour(zoomed_mask, colors=color, linewidths=1)
|
| 677 |
+
plt.axis("off")
|
| 678 |
+
plt.savefig(output_path, bbox_inches="tight")
|
| 679 |
+
plt.close()
|
| 680 |
+
return True
|
| 681 |
+
except Exception as e:
|
| 682 |
+
return False
|
| 683 |
+
|
| 684 |
+
def get_pdac_staging(clabel_id):
|
| 685 |
+
try:
|
| 686 |
+
subfolder = "ImageTr" if int(clabel_id) < 9000 else "ImageTe"
|
| 687 |
+
label_subfolder = "LabelTr" if int(clabel_id) < 9000 else "LabelTe"
|
| 688 |
+
main_nifti_path = f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(clabel_id)}/{Constants.MAIN_NIFTI_FILENAME}"
|
| 689 |
+
combined_labels_path = f"{Constants.PANTS_PATH}/data/{label_subfolder}/{get_panTS_id(clabel_id)}/{Constants.COMBINED_LABELS_NIFTI_FILENAME}"
|
| 690 |
+
|
| 691 |
+
nifti_processor = NiftiProcessor(main_nifti_path, combined_labels_path)
|
| 692 |
+
staging_result = nifti_processor.calculate_pdac_sma_staging()
|
| 693 |
+
|
| 694 |
+
return {"staging_description": staging_result}
|
| 695 |
+
|
| 696 |
+
except Exception as e:
|
| 697 |
+
import traceback
|
| 698 |
+
traceback.print_exc()
|
| 699 |
+
return {"error": f"PDAC staging failed: {str(e)}"}
|
| 700 |
+
|
| 701 |
+
import json
|
| 702 |
+
def download_clean_folder(root):
|
| 703 |
+
"""
|
| 704 |
+
如果文件正好匹配4个目标名,则删除其中两个,并将combined_labels.nii.gz根据dataset.json分割为独立器官文件。
|
| 705 |
+
"""
|
| 706 |
+
target_files = {
|
| 707 |
+
"combined_labels.nii.gz",
|
| 708 |
+
"dataset.json",
|
| 709 |
+
"plans.json",
|
| 710 |
+
"predict_from_raw_data_args.json"
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
actual_files = set(os.listdir(root))
|
| 714 |
+
if actual_files == target_files:
|
| 715 |
+
# 删除 plans.json 和 predict_from_raw_data_args.json
|
| 716 |
+
for fname in ["plans.json", "predict_from_raw_data_args.json"]:
|
| 717 |
+
fpath = os.path.join(root, fname)
|
| 718 |
+
if os.path.exists(fpath):
|
| 719 |
+
os.remove(fpath)
|
| 720 |
+
print(f"🗑️ Removed during zip: {fpath}")
|
| 721 |
+
|
| 722 |
+
# 读取 dataset.json
|
| 723 |
+
dataset_json_path = os.path.join(root, "dataset.json")
|
| 724 |
+
with open(dataset_json_path, 'r') as f:
|
| 725 |
+
dataset_info = json.load(f)
|
| 726 |
+
|
| 727 |
+
labels = dataset_info["labels"] # 获取标签名与ID的映射
|
| 728 |
+
|
| 729 |
+
# 读取 combined_labels.nii.gz
|
| 730 |
+
combined_path = os.path.join(root, "combined_labels.nii.gz")
|
| 731 |
+
combined_img = nib.load(combined_path)
|
| 732 |
+
combined_data = combined_img.get_fdata()
|
| 733 |
+
affine = combined_img.affine
|
| 734 |
+
|
| 735 |
+
# 创建 segmentations 文件夹
|
| 736 |
+
seg_folder = os.path.join(root, "segmentations")
|
| 737 |
+
os.makedirs(seg_folder, exist_ok=True)
|
| 738 |
+
|
| 739 |
+
# 为每个标签生成单独的 mask 文件
|
| 740 |
+
for label_name, label_value in labels.items():
|
| 741 |
+
mask = (combined_data == label_value).astype(np.uint8)
|
| 742 |
+
label_img = nib.Nifti1Image(mask, affine)
|
| 743 |
+
out_path = os.path.join(seg_folder, f"{label_name}.nii.gz")
|
| 744 |
+
nib.save(label_img, out_path)
|
| 745 |
+
print(f"✅ Saved: {out_path}")
|
| 746 |
+
os.remove(dataset_json_path)
|
| 747 |
+
else:
|
| 748 |
+
print("ℹ️ Folder content does not match the expected file set. Skipping cleanup and split.")
|
| 749 |
+
|
| 750 |
+
async def store_files(combined_labels_id):
|
| 751 |
+
subfolder = "LabelTr" if int(combined_labels_id) < 9000 else "LabelTe"
|
| 752 |
+
image_subfolder = "ImageTr" if int(combined_labels_id) < 9000 else "ImageTe"
|
| 753 |
+
|
| 754 |
+
def download(url, path):
|
| 755 |
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
| 756 |
+
headers = {"User-Agent": "Mozilla/5.0"}
|
| 757 |
+
res = requests.get(url, stream=True, headers=headers, allow_redirects=True)
|
| 758 |
+
if res.status_code == 200:
|
| 759 |
+
with open(path, "wb") as f:
|
| 760 |
+
for chunk in res.iter_content(1024):
|
| 761 |
+
f.write(chunk)
|
| 762 |
+
print(f"Saved: {path}")
|
| 763 |
+
else:
|
| 764 |
+
print(f"Failed: {url} ({res.status_code})")
|
| 765 |
+
|
| 766 |
+
# main CT
|
| 767 |
+
image_url = f"https://huggingface.co/datasets/BodyMaps/iPanTSMini/resolve/main/image_only/{get_panTS_id(combined_labels_id)}/ct.nii.gz"
|
| 768 |
+
image_path = f"{Constants.PANTS_PATH}/data/{image_subfolder}/{get_panTS_id(combined_labels_id)}/ct.nii.gz"
|
| 769 |
+
download(image_url, image_path)
|
| 770 |
+
|
| 771 |
+
# labels
|
| 772 |
+
for label in list(Constants.PREDEFINED_LABELS.values()):
|
| 773 |
+
mask_url = f"https://huggingface.co/datasets/BodyMaps/iPanTSMini/resolve/main/mask_only/{get_panTS_id(combined_labels_id)}/segmentations/{label}.nii.gz"
|
| 774 |
+
mask_path = f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(combined_labels_id)}/segmentations/{label}.nii.gz"
|
| 775 |
+
download(mask_url, mask_path)
|
| 776 |
+
|
| 777 |
+
META_FILE = f"{Constants.PANTS_PATH}/data/metadata.xlsx"
|
| 778 |
+
# ---------------------------
|
| 779 |
+
# Helpers
|
| 780 |
+
# ---------------------------
|
| 781 |
+
def _arg(name: str, default=None):
|
| 782 |
+
return request.args.get(name, default)
|
| 783 |
+
|
| 784 |
+
def to_int(x) -> Optional[int]:
|
| 785 |
+
try:
|
| 786 |
+
return int(x)
|
| 787 |
+
except Exception:
|
| 788 |
+
return None
|
| 789 |
+
|
| 790 |
+
def _to_float(x) -> Optional[float]:
|
| 791 |
+
try:
|
| 792 |
+
return float(x)
|
| 793 |
+
except Exception:
|
| 794 |
+
return None
|
| 795 |
+
|
| 796 |
+
def _to01_query(x) -> Optional[int]:
|
| 797 |
+
if x is None: return None
|
| 798 |
+
s = str(x).strip().lower()
|
| 799 |
+
if s in ("1","true","yes","y"): return 1
|
| 800 |
+
if s in ("0","false","no","n"): return 0
|
| 801 |
+
return None
|
| 802 |
+
|
| 803 |
+
def _collect_list_params(names: List[str]) -> List[str]:
|
| 804 |
+
out: List[str] = []
|
| 805 |
+
for n in names:
|
| 806 |
+
if n in request.args:
|
| 807 |
+
out += request.args.getlist(n)
|
| 808 |
+
tmp: List[str] = []
|
| 809 |
+
for s in out:
|
| 810 |
+
if "," in s:
|
| 811 |
+
tmp += [t.strip() for t in s.split(",") if t.strip()]
|
| 812 |
+
else:
|
| 813 |
+
tmp.append(s.strip())
|
| 814 |
+
return [t for t in tmp if t]
|
| 815 |
+
|
| 816 |
+
def _nan2none(v):
|
| 817 |
+
try:
|
| 818 |
+
if v is None: return None
|
| 819 |
+
if pd.isna(v): return None
|
| 820 |
+
except Exception:
|
| 821 |
+
pass
|
| 822 |
+
return v
|
| 823 |
+
|
| 824 |
+
def clean_json_list(items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
| 825 |
+
def _clean(v):
|
| 826 |
+
if isinstance(v, (np.integer,)): return int(v)
|
| 827 |
+
if isinstance(v, (np.floating,)): return float(v)
|
| 828 |
+
if isinstance(v, (np.bool_,)): return bool(v)
|
| 829 |
+
return v
|
| 830 |
+
return [{k: _clean(v) for k, v in d.items()} for d in items]
|
| 831 |
+
|
| 832 |
+
def _canon_letters_digits(s: str) -> str:
|
| 833 |
+
# 把 "LightSpeed16" 變成 "LightSpeed 16"
|
| 834 |
+
s2 = re.sub(r"([A-Za-z])(\d)", r"\1 \2", s)
|
| 835 |
+
s2 = re.sub(r"(\d)([A-Za-z])", r"\1 \2", s2)
|
| 836 |
+
return re.sub(r"\s+", " ", s2).strip()
|
| 837 |
+
|
| 838 |
+
def canon_model(s: str) -> str:
|
| 839 |
+
if not s: return ""
|
| 840 |
+
base = str(s).strip()
|
| 841 |
+
# 標準化空白/底線/大小寫
|
| 842 |
+
low = re.sub(r"[_\-]+", " ", base).strip().lower()
|
| 843 |
+
low = _canon_letters_digits(low)
|
| 844 |
+
# 套用別名表
|
| 845 |
+
if low in Constants.MODEL_ALIASES:
|
| 846 |
+
return Constants.MODEL_ALIASES[low]
|
| 847 |
+
# 沒有在別名表時:維持「字母數字分隔 + 每字首大寫」的安全格式
|
| 848 |
+
spaced = _canon_letters_digits(base)
|
| 849 |
+
# 常見廠牌固定大寫
|
| 850 |
+
spaced = re.sub(r"(?i)^somatom", "SOMATOM", spaced)
|
| 851 |
+
spaced = re.sub(r"(?i)^iqon", "IQon", spaced)
|
| 852 |
+
return spaced
|
| 853 |
+
|
| 854 |
+
# ---------------------------
|
| 855 |
+
# Load & normalize
|
| 856 |
+
# ---------------------------
|
| 857 |
+
def _norm_cols(df_raw: pd.DataFrame) -> pd.DataFrame:
|
| 858 |
+
"""標準化欄位,產出搜尋/排序需要的衍生欄位。"""
|
| 859 |
+
df = df_raw.copy()
|
| 860 |
+
|
| 861 |
+
# ---- Case ID ----
|
| 862 |
+
case_cols = ["PanTS ID", "PanTS_ID", "case_id", "id", "case", "CaseID"]
|
| 863 |
+
def _first_nonempty(row, cols):
|
| 864 |
+
for c in cols:
|
| 865 |
+
if c in row.index and pd.notna(row[c]) and str(row[c]).strip():
|
| 866 |
+
return str(row[c]).strip(), c
|
| 867 |
+
return "", None
|
| 868 |
+
|
| 869 |
+
cases, mapping = [], []
|
| 870 |
+
for _, r in df.iterrows():
|
| 871 |
+
s, c = _first_nonempty(r, case_cols)
|
| 872 |
+
cases.append(s); mapping.append({"case": c} if c else {})
|
| 873 |
+
df["__case_str"] = cases
|
| 874 |
+
df["_orig_cols"] = mapping
|
| 875 |
+
|
| 876 |
+
# ---- Tumor -> __tumor01 ----
|
| 877 |
+
def _canon(s: str) -> str: return re.sub(r"[^a-z]+", "", str(s).lower())
|
| 878 |
+
tumor_names = [c for c in df.columns if "tumor" in _canon(c)] or []
|
| 879 |
+
tcol = tumor_names[0] if tumor_names else None
|
| 880 |
+
|
| 881 |
+
def _to01_v(v):
|
| 882 |
+
if pd.isna(v): return np.nan
|
| 883 |
+
s = str(v).strip().lower()
|
| 884 |
+
if s in ("1","yes","y","true","t"): return 1
|
| 885 |
+
if s in ("0","no","n","false","f"): return 0
|
| 886 |
+
try:
|
| 887 |
+
iv = int(float(s))
|
| 888 |
+
return 1 if iv == 1 else (0 if iv == 0 else np.nan)
|
| 889 |
+
except Exception:
|
| 890 |
+
return np.nan
|
| 891 |
+
|
| 892 |
+
df["__tumor01"] = (df[tcol].map(_to01_v) if tcol else pd.Series([np.nan]*len(df), index=df.index))
|
| 893 |
+
if tcol:
|
| 894 |
+
df["_orig_cols"] = [{**(df["_orig_cols"].iat[i] or {}), "tumor": tcol} for i in range(len(df))]
|
| 895 |
+
|
| 896 |
+
# ---- Sex -> __sex ----
|
| 897 |
+
df["__sex"] = df.get("sex", pd.Series([""]*len(df))).astype(str).str.strip().str.upper()
|
| 898 |
+
df["__sex"] = df["__sex"].where(df["__sex"].isin(["F","M"]), "")
|
| 899 |
+
|
| 900 |
+
# ---- Generic column finder ----
|
| 901 |
+
def _find_col(prefer, keyword_sets=None):
|
| 902 |
+
for c in prefer:
|
| 903 |
+
if c in df.columns: return c
|
| 904 |
+
if keyword_sets:
|
| 905 |
+
canon_map = {c: re.sub(r"[^a-z0-9]+", "", str(c).lower()) for c in df.columns}
|
| 906 |
+
for c, cs in canon_map.items():
|
| 907 |
+
for ks in keyword_sets:
|
| 908 |
+
if all(k in cs for k in ks): return c
|
| 909 |
+
return None
|
| 910 |
+
|
| 911 |
+
# ---- CT phase -> __ct / __ct_lc ----
|
| 912 |
+
ct_col = _find_col(
|
| 913 |
+
prefer=["ct phase","CT phase","ct_phase","CT_phase","ct"],
|
| 914 |
+
keyword_sets=[["ct","phase"],["phase"]],
|
| 915 |
+
)
|
| 916 |
+
if ct_col:
|
| 917 |
+
df["__ct"] = df[ct_col].astype(str).str.strip()
|
| 918 |
+
df["__ct_lc"] = df["__ct"].str.lower()
|
| 919 |
+
df["_orig_cols"] = [{**(df["_orig_cols"].iat[i] or {}), "ct_phase": ct_col} for i in range(len(df))]
|
| 920 |
+
else:
|
| 921 |
+
df["__ct"], df["__ct_lc"] = "", ""
|
| 922 |
+
|
| 923 |
+
# ---- Manufacturer -> __mfr / __mfr_lc ----
|
| 924 |
+
mfr_col = _find_col(
|
| 925 |
+
prefer=["manufacturer","Manufacturer","mfr","MFR","vendor","Vendor","manufacturer name","Manufacturer Name"],
|
| 926 |
+
keyword_sets=[["manufactur"],["vendor"],["brand"],["maker"]],
|
| 927 |
+
)
|
| 928 |
+
if mfr_col:
|
| 929 |
+
df["__mfr"] = df[mfr_col].astype(str).str.strip()
|
| 930 |
+
df["__mfr_lc"] = df["__mfr"].str.lower()
|
| 931 |
+
df["_orig_cols"] = [{**(df["_orig_cols"].iat[i] or {}), "manufacturer": mfr_col} for i in range(len(df))]
|
| 932 |
+
else:
|
| 933 |
+
df["__mfr"], df["__mfr_lc"] = "", ""
|
| 934 |
+
|
| 935 |
+
# ---- Manufacturer model -> model / __model_lc ----
|
| 936 |
+
model_col = _find_col(
|
| 937 |
+
prefer=["manufacturer model", "Manufacturer model", "model", "Model"],
|
| 938 |
+
keyword_sets=[["model"]],
|
| 939 |
+
)
|
| 940 |
+
if model_col:
|
| 941 |
+
# 保留原始字串以便追蹤
|
| 942 |
+
df["model_raw"] = df[model_col].astype(str).str.strip()
|
| 943 |
+
# 規則化為標準型號(大小寫、空白、數字黏在一起等)
|
| 944 |
+
df["model"] = df["model_raw"].map(canon_model)
|
| 945 |
+
df["__model_lc"] = df["model"].str.lower()
|
| 946 |
+
df["_orig_cols"] = [
|
| 947 |
+
{**(df["_orig_cols"].iat[i] or {}), "model": model_col}
|
| 948 |
+
for i in range(len(df))
|
| 949 |
+
]
|
| 950 |
+
else:
|
| 951 |
+
# 以免前端讀不到欄位
|
| 952 |
+
df["model_raw"] = ""
|
| 953 |
+
df["model"] = ""
|
| 954 |
+
df["__model_lc"] = ""
|
| 955 |
+
|
| 956 |
+
# ---- Year -> __year_int ----
|
| 957 |
+
year_col = _find_col(prefer=["study year", "Study year", "study_year", "year", "Year"],
|
| 958 |
+
keyword_sets=[["year"]])
|
| 959 |
+
df["__year_int"] = (
|
| 960 |
+
pd.to_numeric(df[year_col], errors="coerce")
|
| 961 |
+
if year_col else pd.Series([np.nan] * len(df), index=df.index)
|
| 962 |
+
)
|
| 963 |
+
if year_col:
|
| 964 |
+
df["_orig_cols"] = [
|
| 965 |
+
{**(df["_orig_cols"].iat[i] or {}), "year": year_col}
|
| 966 |
+
for i in range(len(df))
|
| 967 |
+
]
|
| 968 |
+
|
| 969 |
+
# ---- Age -> __age ----
|
| 970 |
+
age_col = _find_col(prefer=["age", "Age"], keyword_sets=[["age"]])
|
| 971 |
+
df["__age"] = (
|
| 972 |
+
pd.to_numeric(df[age_col], errors="coerce")
|
| 973 |
+
if age_col else pd.Series([np.nan] * len(df), index=df.index)
|
| 974 |
+
)
|
| 975 |
+
if age_col:
|
| 976 |
+
df["_orig_cols"] = [
|
| 977 |
+
{**(df["_orig_cols"].iat[i] or {}), "age": age_col}
|
| 978 |
+
for i in range(len(df))
|
| 979 |
+
]
|
| 980 |
+
|
| 981 |
+
# ---- Study type -> study_type / __st_lc ----
|
| 982 |
+
st_col = _find_col(
|
| 983 |
+
prefer=["study type", "Study type", "study_type", "Study_type"],
|
| 984 |
+
keyword_sets=[["study", "type"]],
|
| 985 |
+
)
|
| 986 |
+
if st_col:
|
| 987 |
+
df["study_type"] = df[st_col].astype(str)
|
| 988 |
+
df["__st_lc"] = df["study_type"].astype(str).str.strip().str.lower()
|
| 989 |
+
df["_orig_cols"] = [
|
| 990 |
+
{**(df["_orig_cols"].iat[i] or {}), "study_type": st_col}
|
| 991 |
+
for i in range(len(df))
|
| 992 |
+
]
|
| 993 |
+
else:
|
| 994 |
+
df["study_type"] = ""
|
| 995 |
+
df["__st_lc"] = ""
|
| 996 |
+
|
| 997 |
+
# ---- Site nationality -> site_nationality / __sn_lc ----
|
| 998 |
+
sn_col = _find_col(
|
| 999 |
+
prefer=[
|
| 1000 |
+
"site nationality", "Site nationality", "site_nationality", "Site_nationality",
|
| 1001 |
+
"nationality", "Nationality", "site country", "Site country", "country", "Country"
|
| 1002 |
+
],
|
| 1003 |
+
keyword_sets=[["site", "national"], ["nationality"], ["site", "country"], ["country"]],
|
| 1004 |
+
)
|
| 1005 |
+
if sn_col:
|
| 1006 |
+
df["site_nationality"] = df[sn_col].astype(str)
|
| 1007 |
+
df["__sn_lc"] = df["site_nationality"].astype(str).str.strip().str.lower()
|
| 1008 |
+
df["_orig_cols"] = [
|
| 1009 |
+
{**(df["_orig_cols"].iat[i] or {}), "site_nationality": sn_col}
|
| 1010 |
+
for i in range(len(df))
|
| 1011 |
+
]
|
| 1012 |
+
else:
|
| 1013 |
+
df["site_nationality"] = ""
|
| 1014 |
+
df["__sn_lc"] = ""
|
| 1015 |
+
|
| 1016 |
+
return df
|
| 1017 |
+
|
| 1018 |
+
|
| 1019 |
+
def _safe_float(x) -> Optional[float]:
|
| 1020 |
+
try:
|
| 1021 |
+
if x is None: return None
|
| 1022 |
+
if isinstance(x, float) and np.isnan(x): return None
|
| 1023 |
+
if isinstance(x, str):
|
| 1024 |
+
s = x.strip().replace(",", " ")
|
| 1025 |
+
if not s: return None
|
| 1026 |
+
return float(s)
|
| 1027 |
+
return float(x)
|
| 1028 |
+
except Exception:
|
| 1029 |
+
return None
|
| 1030 |
+
|
| 1031 |
+
def _take_first_str(row, cols: List[str]) -> str:
|
| 1032 |
+
for c in cols:
|
| 1033 |
+
if c in row and pd.notna(row[c]) and str(row[c]).strip():
|
| 1034 |
+
return str(row[c]).strip()
|
| 1035 |
+
return ""
|
| 1036 |
+
|
| 1037 |
+
def _case_key(row) -> int:
|
| 1038 |
+
s = _take_first_str(row, ["PanTS ID","PanTS_ID","case_id","id","__case_str"])
|
| 1039 |
+
if not s: return 0
|
| 1040 |
+
m = re.search(r"(\d+)", str(s))
|
| 1041 |
+
return int(m.group(1)) if m else 0
|
| 1042 |
+
|
| 1043 |
+
def _parse_3tuple_from_row(row, name_candidates: List[str]) -> List[Optional[float]]:
|
| 1044 |
+
# 3 個獨立欄
|
| 1045 |
+
for base in name_candidates:
|
| 1046 |
+
cx, cy, cz = f"{base}_x", f"{base}_y", f"{base}_z"
|
| 1047 |
+
if cx in row and cy in row and cz in row:
|
| 1048 |
+
xs = [_safe_float(row[c]) for c in (cx, cy, cz)]
|
| 1049 |
+
if all(v is not None for v in xs):
|
| 1050 |
+
return xs
|
| 1051 |
+
# 單欄字串
|
| 1052 |
+
seps = [",", "x", " ", "×", "X", ";", "|"]
|
| 1053 |
+
str_cols = []
|
| 1054 |
+
for base in name_candidates:
|
| 1055 |
+
str_cols += [base, f"{base}_str", base.replace(" ", "_")]
|
| 1056 |
+
for c in str_cols:
|
| 1057 |
+
if c in row and pd.notna(row[c]):
|
| 1058 |
+
s = str(row[c]).strip()
|
| 1059 |
+
if not s: continue
|
| 1060 |
+
s2 = re.sub(r"[\[\]\(\)\{\}]", " ", s)
|
| 1061 |
+
for sep in seps:
|
| 1062 |
+
s2 = s2.replace(sep, " ")
|
| 1063 |
+
parts = [p for p in s2.split() if p]
|
| 1064 |
+
vals = [_safe_float(p) for p in parts[:3]]
|
| 1065 |
+
if len(vals) == 3 and all(v is not None for v in vals):
|
| 1066 |
+
return vals
|
| 1067 |
+
return [None, None, None]
|
| 1068 |
+
|
| 1069 |
+
def _spacing_sum(row) -> Optional[float]:
|
| 1070 |
+
vals = _parse_3tuple_from_row(row, ["spacing","voxel_spacing","voxel_size","pixel_spacing"])
|
| 1071 |
+
if any(v is None for v in vals): return None
|
| 1072 |
+
return float(vals[0] + vals[1] + vals[2])
|
| 1073 |
+
|
| 1074 |
+
def _shape_sum(row) -> Optional[float]:
|
| 1075 |
+
vals = _parse_3tuple_from_row(row, ["shape","dim","size","image_shape","resolution"])
|
| 1076 |
+
if any(v is None for v in vals): return None
|
| 1077 |
+
return float(vals[0] + vals[1] + vals[2])
|
| 1078 |
+
|
| 1079 |
+
def ensure_sort_cols(df: pd.DataFrame) -> pd.DataFrame:
|
| 1080 |
+
if "__case_sortkey" not in df.columns:
|
| 1081 |
+
df["__case_sortkey"] = df.apply(_case_key, axis=1)
|
| 1082 |
+
if "__spacing_sum" not in df.columns:
|
| 1083 |
+
df["__spacing_sum"] = df.apply(_spacing_sum, axis=1)
|
| 1084 |
+
if "__shape_sum" not in df.columns:
|
| 1085 |
+
df["__shape_sum"] = df.apply(_shape_sum, axis=1)
|
| 1086 |
+
|
| 1087 |
+
# 完整度:Browse 與 top 排序會用到
|
| 1088 |
+
need_cols = ["__spacing_sum", "__shape_sum", "__sex", "__age"]
|
| 1089 |
+
complete = pd.Series(True, index=df.index)
|
| 1090 |
+
for c in need_cols:
|
| 1091 |
+
if c not in df.columns:
|
| 1092 |
+
complete &= False
|
| 1093 |
+
elif c == "__sex":
|
| 1094 |
+
complete &= (df[c].astype(str).str.strip() != "")
|
| 1095 |
+
else:
|
| 1096 |
+
complete &= df[c].notna()
|
| 1097 |
+
df["__complete"] = complete
|
| 1098 |
+
return df
|
| 1099 |
+
|
| 1100 |
+
# load meta
|
| 1101 |
+
if not os.path.exists(META_FILE):
|
| 1102 |
+
raise FileNotFoundError(f"metadata not found: {META_FILE}")
|
| 1103 |
+
DF_RAW = pd.read_excel(META_FILE)
|
| 1104 |
+
DF = _norm_cols(DF_RAW)
|
| 1105 |
+
|
| 1106 |
+
def apply_filters(base: pd.DataFrame, exclude: Optional[Set[str]] = None) -> pd.DataFrame:
|
| 1107 |
+
exclude = exclude or set()
|
| 1108 |
+
df = base
|
| 1109 |
+
|
| 1110 |
+
# --- Case ID / keyword(精準匹配) ---
|
| 1111 |
+
q = (_arg("q") or _arg("caseid") or "").strip()
|
| 1112 |
+
if q and "caseid" not in exclude and "__case_str" in df.columns:
|
| 1113 |
+
s = df["__case_str"].astype(str)
|
| 1114 |
+
if q.isdigit():
|
| 1115 |
+
# 把每列所有數字 token 抓出來,做數值等號;77 不會吃 177/077(前導 0 忽略)
|
| 1116 |
+
qq = int(q)
|
| 1117 |
+
nums = s.str.findall(r"\d+")
|
| 1118 |
+
mask_num = nums.apply(lambda xs: any(int(x) == qq for x in xs))
|
| 1119 |
+
# 備援:允許 "Case 77"(不必留可刪)
|
| 1120 |
+
patt = rf"(?i)\b(?:case\s*)?{re.escape(q)}\b"
|
| 1121 |
+
mask_regex = s.str.contains(patt, na=False, regex=True)
|
| 1122 |
+
df = df[mask_num | mask_regex]
|
| 1123 |
+
else:
|
| 1124 |
+
# 一般文字搜尋(忽略大小寫;避免把查詢當正則)
|
| 1125 |
+
df = df[s.str.contains(re.escape(q), na=False, case=False, regex=False)]
|
| 1126 |
+
|
| 1127 |
+
# --- Tumor ---
|
| 1128 |
+
tv = _to01_query(_arg("tumor"))
|
| 1129 |
+
tnull = _to01_query(_arg("tumor_is_null"))
|
| 1130 |
+
if (_arg("tumor", "").strip().lower() == "unknown"):
|
| 1131 |
+
tnull, tv = 1, None
|
| 1132 |
+
if "__tumor01" in df.columns and "tumor" not in exclude:
|
| 1133 |
+
if tnull in (0, 1) and "tumor_is_null" not in exclude:
|
| 1134 |
+
df = df[df["__tumor01"].isna()] if tnull == 1 else df[df["__tumor01"].notna()]
|
| 1135 |
+
elif tv in (0, 1):
|
| 1136 |
+
df = df[df["__tumor01"] == tv]
|
| 1137 |
+
|
| 1138 |
+
# --- Sex(多選 + Unknown)---
|
| 1139 |
+
sv_list = _collect_list_params(["sex", "sex[]"])
|
| 1140 |
+
snull = _to01_query(_arg("sex_is_null"))
|
| 1141 |
+
if not sv_list:
|
| 1142 |
+
sv = (_arg("sex", "") or "").strip().upper()
|
| 1143 |
+
if sv:
|
| 1144 |
+
sv_list = [sv]
|
| 1145 |
+
sv_norm = []
|
| 1146 |
+
for s_ in sv_list:
|
| 1147 |
+
s2 = (s_ or "").strip().upper()
|
| 1148 |
+
if s2 in ("M", "F"):
|
| 1149 |
+
sv_norm.append(s2)
|
| 1150 |
+
elif s2 in ("U", "UNKNOWN"):
|
| 1151 |
+
sv_norm.append("UNKNOWN")
|
| 1152 |
+
if "__sex" in df.columns and "sex" not in exclude and (sv_norm or snull in (0, 1)):
|
| 1153 |
+
ser = df["__sex"].fillna("").str.strip().str.upper()
|
| 1154 |
+
take = pd.Series(False, index=df.index)
|
| 1155 |
+
vals = [s for s in sv_norm if s in ("F", "M")]
|
| 1156 |
+
if vals:
|
| 1157 |
+
take |= ser.isin(vals)
|
| 1158 |
+
if ("UNKNOWN" in sv_norm) or (snull == 1):
|
| 1159 |
+
take |= (ser == "")
|
| 1160 |
+
df = df[take]
|
| 1161 |
+
|
| 1162 |
+
# --- Age:���援 age_bin[](含 90+ / UNKNOWN),否則回退 age_from/age_to ---
|
| 1163 |
+
bins = _collect_list_params(["age_bin", "age_bin[]"])
|
| 1164 |
+
age_null = _to01_query(_arg("age_is_null"))
|
| 1165 |
+
if "__age" in df.columns and bins:
|
| 1166 |
+
age_series = pd.to_numeric(df["__age"], errors="coerce")
|
| 1167 |
+
mask = pd.Series(False, index=df.index)
|
| 1168 |
+
for b in bins:
|
| 1169 |
+
s = (b or "").strip()
|
| 1170 |
+
m_plus = re.match(r"^\s*(\d+)\s*\+\s*$", s)
|
| 1171 |
+
if m_plus:
|
| 1172 |
+
lo = int(m_plus.group(1))
|
| 1173 |
+
mask |= (age_series >= lo)
|
| 1174 |
+
continue
|
| 1175 |
+
m_rng = re.match(r"^\s*(\d+)\s*[-–—]\s*(\d+)\s*$", s)
|
| 1176 |
+
if m_rng:
|
| 1177 |
+
lo, hi = int(m_rng.group(1)), int(m_rng.group(2))
|
| 1178 |
+
mask |= age_series.between(lo, hi, inclusive="both")
|
| 1179 |
+
if (age_null == 1) or any((t or "").strip().upper() == "UNKNOWN" for t in bins):
|
| 1180 |
+
mask |= age_series.isna() | (df["__age"].astype(str).str.strip().str.upper() == "UNKNOWN")
|
| 1181 |
+
df = df[mask]
|
| 1182 |
+
elif "__age" in df.columns:
|
| 1183 |
+
af = _to_float(_arg("age_from")); at = _to_float(_arg("age_to"))
|
| 1184 |
+
age_series = pd.to_numeric(df["__age"], errors="coerce")
|
| 1185 |
+
if "age_from" not in exclude and af is not None:
|
| 1186 |
+
df = df[age_series >= af]
|
| 1187 |
+
if "age_to" not in exclude and at is not None:
|
| 1188 |
+
df = df[age_series <= at]
|
| 1189 |
+
|
| 1190 |
+
# --- CT phase ---
|
| 1191 |
+
ct = (_arg("ct_phase", "") or "").strip().lower()
|
| 1192 |
+
ct_list = _collect_list_params(["ct_phase", "ct_phase[]"])
|
| 1193 |
+
if ct == "unknown" or any((s or "").lower() == "unknown" for s in ct_list):
|
| 1194 |
+
if "__ct" in df.columns:
|
| 1195 |
+
s_ct = df["__ct"].astype(str).str.strip().str.lower()
|
| 1196 |
+
tokens_null_ct = {'', 'unknown', 'nan', 'n/a', 'na', 'none', '(blank)', '(null)'}
|
| 1197 |
+
df = df[df["__ct"].isna() | s_ct.isin(tokens_null_ct)]
|
| 1198 |
+
elif (ct or ct_list) and "__ct_lc" in df.columns:
|
| 1199 |
+
parts = []
|
| 1200 |
+
if ct:
|
| 1201 |
+
parts += [p.strip() for p in re.split(r"[;,/]+", ct) if p.strip()]
|
| 1202 |
+
parts += [p.strip().lower() for p in ct_list if p.strip()]
|
| 1203 |
+
patt = "|".join(re.escape(p) for p in parts)
|
| 1204 |
+
df = df[df["__ct_lc"].str.contains(patt, na=False)]
|
| 1205 |
+
|
| 1206 |
+
# --- Manufacturer ---
|
| 1207 |
+
m_list = _collect_list_params(["manufacturer", "manufacturer[]", "mfr"])
|
| 1208 |
+
m_raw = (_arg("manufacturer", "") or "").strip()
|
| 1209 |
+
if m_raw and not m_list:
|
| 1210 |
+
m_list = [p.strip() for p in m_raw.split(",") if p.strip()]
|
| 1211 |
+
if m_list and "__mfr_lc" in df.columns:
|
| 1212 |
+
m_lc = [s.lower() for s in m_list]
|
| 1213 |
+
df = df[df["__mfr_lc"].isin(m_lc)]
|
| 1214 |
+
|
| 1215 |
+
# --- Model(canonical;可 fuzzy)---
|
| 1216 |
+
model_list = _collect_list_params(["model", "model[]", "manufacturer_model"])
|
| 1217 |
+
model_raw = (_arg("model", "") or "").strip()
|
| 1218 |
+
if model_raw and not model_list:
|
| 1219 |
+
model_list = [p.strip() for p in re.split(r"[;,/|]+", model_raw) if p.strip()]
|
| 1220 |
+
if model_list and "__model_lc" in df.columns and "model" not in exclude:
|
| 1221 |
+
wants = [canon_model(p).lower() for p in model_list if p]
|
| 1222 |
+
wants = [w for w in wants if w]
|
| 1223 |
+
fuzzy = str(_arg("model_fuzzy", "0")).lower() in ("1", "true", "yes")
|
| 1224 |
+
if fuzzy:
|
| 1225 |
+
patt = "|".join(re.escape(w) for w in wants)
|
| 1226 |
+
df = df[df["__model_lc"].str.contains(patt, na=False)]
|
| 1227 |
+
else:
|
| 1228 |
+
df = df[df["__model_lc"].isin(set(wants))]
|
| 1229 |
+
|
| 1230 |
+
# --- Study type ---
|
| 1231 |
+
st_list = _collect_list_params(["study_type", "study_type[]"])
|
| 1232 |
+
st_raw = (_arg("study_type", "") or "").strip()
|
| 1233 |
+
if st_raw and not st_list:
|
| 1234 |
+
st_list = [p.strip() for p in re.split(r"[;,/|]+", st_raw) if p.strip()]
|
| 1235 |
+
if st_list and "__st_lc" in df.columns and "study_type" not in exclude:
|
| 1236 |
+
parts = [p.lower() for p in st_list]
|
| 1237 |
+
patt = "|".join(re.escape(p) for p in parts)
|
| 1238 |
+
df = df[df["__st_lc"].str.contains(patt, na=False)]
|
| 1239 |
+
|
| 1240 |
+
# --- Site nationality ---
|
| 1241 |
+
nat_list = _collect_list_params(["site_nat", "site_nat[]", "site_nationality", "site_nationality[]"])
|
| 1242 |
+
nat_raw = (_arg("site_nationality", "") or _arg("site_nat", "") or "").strip()
|
| 1243 |
+
if nat_raw and not nat_list:
|
| 1244 |
+
nat_list = [p.strip() for p in re.split(r"[;,/|]+", nat_raw) if p.strip()]
|
| 1245 |
+
if nat_list and "__sn_lc" in df.columns and "site_nationality" not in exclude:
|
| 1246 |
+
parts = [p.lower() for p in nat_list]
|
| 1247 |
+
patt = "|".join(re.escape(p) for p in parts)
|
| 1248 |
+
df = df[df["__sn_lc"].str.contains(patt, na=False)]
|
| 1249 |
+
|
| 1250 |
+
# --- Year(新增)---
|
| 1251 |
+
# 支援 year / year[](多選精確)、year_from / year_to(範圍)與 year_is_null(Unknown)
|
| 1252 |
+
if "year" not in exclude:
|
| 1253 |
+
_year_cols_pref = ["__year_int", "study_year", "Study year", "study year", "Year", "year"]
|
| 1254 |
+
_found_cols = [c for c in _year_cols_pref if c in df.columns]
|
| 1255 |
+
if _found_cols:
|
| 1256 |
+
yser = pd.to_numeric(df[_found_cols[0]], errors="coerce")
|
| 1257 |
+
|
| 1258 |
+
# 1) 多選年份
|
| 1259 |
+
year_list = _collect_list_params(["year", "year[]"])
|
| 1260 |
+
year_raw = (_arg("year", "") or "").strip()
|
| 1261 |
+
if year_raw and not year_list:
|
| 1262 |
+
year_list = [p.strip() for p in re.split(r"[;,/|]+", year_raw) if p.strip()]
|
| 1263 |
+
|
| 1264 |
+
# 2) 範圍
|
| 1265 |
+
y_from = to_int(_arg("year_from"))
|
| 1266 |
+
y_to = to_int(_arg("year_to"))
|
| 1267 |
+
|
| 1268 |
+
# 3) Unknown / Null
|
| 1269 |
+
y_is_null = _to01_query(_arg("year_is_null"))
|
| 1270 |
+
_unk_tokens = {"unknown", "nan", "none", "n/a", "na", "(blank)", "(null)"}
|
| 1271 |
+
wants_unknown = (y_is_null == 1) or any(
|
| 1272 |
+
(s or "").strip().lower() in _unk_tokens for s in year_list
|
| 1273 |
+
)
|
| 1274 |
+
|
| 1275 |
+
mask = pd.Series(True, index=df.index)
|
| 1276 |
+
|
| 1277 |
+
# 多選精確年份
|
| 1278 |
+
exact_years = []
|
| 1279 |
+
for s in year_list:
|
| 1280 |
+
try:
|
| 1281 |
+
exact_years.append(int(s))
|
| 1282 |
+
except Exception:
|
| 1283 |
+
pass
|
| 1284 |
+
if exact_years:
|
| 1285 |
+
mask &= yser.isin(set(exact_years))
|
| 1286 |
+
|
| 1287 |
+
# 範圍條件
|
| 1288 |
+
if y_from is not None:
|
| 1289 |
+
mask &= (yser >= y_from)
|
| 1290 |
+
if y_to is not None:
|
| 1291 |
+
mask &= (yser <= y_to)
|
| 1292 |
+
|
| 1293 |
+
# Unknown 合併進來
|
| 1294 |
+
if wants_unknown:
|
| 1295 |
+
mask = mask | yser.isna()
|
| 1296 |
+
|
| 1297 |
+
df = df[mask]
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
return df
|
| 1301 |
+
|
| 1302 |
+
def row_to_item(row: pd.Series) -> Dict[str, Any]:
|
| 1303 |
+
cols = row.get("_orig_cols")
|
| 1304 |
+
cols = cols if isinstance(cols, dict) else {}
|
| 1305 |
+
|
| 1306 |
+
def pick(k, fallback=None):
|
| 1307 |
+
col = cols.get(k)
|
| 1308 |
+
if col and col in row.index:
|
| 1309 |
+
return row[col]
|
| 1310 |
+
return fallback
|
| 1311 |
+
|
| 1312 |
+
return {
|
| 1313 |
+
"PanTS ID": _nan2none(pick("case") or row.get("__case_str")),
|
| 1314 |
+
"case_id": _nan2none(pick("case") or row.get("__case_str")),
|
| 1315 |
+
"tumor": (int(row.get("__tumor01")) if pd.notna(row.get("__tumor01")) else None),
|
| 1316 |
+
"sex": _nan2none(row.get("__sex")),
|
| 1317 |
+
"age": _nan2none(row.get("__age")),
|
| 1318 |
+
"ct phase": _nan2none(pick("ct_phase") or row.get("__ct")),
|
| 1319 |
+
"manufacturer": _nan2none(pick("manufacturer") or row.get("__mfr")),
|
| 1320 |
+
"manufacturer model": _nan2none(pick("model") or row.get("model")),
|
| 1321 |
+
"study year": _nan2none(row.get("__year_int")),
|
| 1322 |
+
"study type": _nan2none(pick("study_type") or row.get("study_type")),
|
| 1323 |
+
"site nationality": _nan2none(pick("site_nationality") or row.get("site_nationality")),
|
| 1324 |
+
# 排序輔助輸出
|
| 1325 |
+
"spacing_sum": _nan2none(row.get("__spacing_sum")),
|
| 1326 |
+
"shape_sum": _nan2none(row.get("__shape_sum")),
|
| 1327 |
+
"complete": bool(row.get("__complete")) if "__complete" in row else None,
|
| 1328 |
+
}
|
app.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
from werkzeug.serving import run_simple
|
| 4 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
load_dotenv(dotenv_path=os.path.join(os.path.dirname(__file__), ".env"))
|
| 8 |
+
#print("DEBUG_ENV_LOADED:", os.environ.get("SESSIONS_DIR_PATH"))
|
| 9 |
+
|
| 10 |
+
from flask import Flask
|
| 11 |
+
from flask_cors import CORS
|
| 12 |
+
from constants import Constants
|
| 13 |
+
#print("DEBUG_CONSTANT:", Constants.SESSIONS_DIR_NAME)
|
| 14 |
+
|
| 15 |
+
from api.api_blueprint import api_blueprint
|
| 16 |
+
from models.base import db
|
| 17 |
+
from models.combined_labels import CombinedLabels
|
| 18 |
+
|
| 19 |
+
def create_session_dir():
|
| 20 |
+
if not os.path.isdir(Constants.SESSIONS_DIR_NAME):
|
| 21 |
+
os.mkdir(Constants.SESSIONS_DIR_NAME)
|
| 22 |
+
|
| 23 |
+
import logging
|
| 24 |
+
|
| 25 |
+
def create_app():
|
| 26 |
+
create_session_dir()
|
| 27 |
+
app = Flask(__name__)
|
| 28 |
+
app.register_blueprint(api_blueprint, url_prefix=f'{Constants.BASE_PATH}/api')
|
| 29 |
+
|
| 30 |
+
class FilterProgressRequests(logging.Filter):
|
| 31 |
+
def filter(self, record):
|
| 32 |
+
return "/api/progress/" not in record.getMessage()
|
| 33 |
+
|
| 34 |
+
logging.getLogger('werkzeug').addFilter(FilterProgressRequests())
|
| 35 |
+
|
| 36 |
+
CORS(app)
|
| 37 |
+
|
| 38 |
+
return app
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
app = create_app()
|
| 42 |
+
|
| 43 |
+
# ✅ SharedArrayBuffer Compatibility
|
| 44 |
+
@app.after_request
|
| 45 |
+
def add_security_headers(response):
|
| 46 |
+
response.headers["Cross-Origin-Opener-Policy"] = "same-origin"
|
| 47 |
+
response.headers["Cross-Origin-Embedder-Policy"] = "require-corp"
|
| 48 |
+
return response
|
| 49 |
+
|
| 50 |
+
def find_watch_files():
|
| 51 |
+
watch_dirs = ['api', 'models', 'services']
|
| 52 |
+
base_path = os.path.dirname(__file__)
|
| 53 |
+
all_files = []
|
| 54 |
+
for d in watch_dirs:
|
| 55 |
+
dir_path = os.path.join(base_path, d)
|
| 56 |
+
for root, _, files in os.walk(dir_path):
|
| 57 |
+
for f in files:
|
| 58 |
+
if f.endswith('.py'):
|
| 59 |
+
all_files.append(os.path.join(root, f))
|
| 60 |
+
return all_files
|
| 61 |
+
|
| 62 |
+
if __name__ == "__main__":
|
| 63 |
+
use_ssl = os.environ.get("USE_SSL", "false").lower() == "true"
|
| 64 |
+
ssl_context = ("../certs/localhost-cert.pem", "../certs/localhost-key.pem") if use_ssl else None
|
| 65 |
+
run_simple(
|
| 66 |
+
hostname="0.0.0.0",
|
| 67 |
+
port=5001,
|
| 68 |
+
application=app,
|
| 69 |
+
use_debugger=True,
|
| 70 |
+
use_reloader=True,
|
| 71 |
+
extra_files=find_watch_files(),
|
| 72 |
+
ssl_context=ssl_context
|
| 73 |
+
)
|
constants.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
import numpy as np
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
load_dotenv(dotenv_path=os.path.join(os.path.dirname(__file__), ".env"))
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Constants:
|
| 10 |
+
# app variables
|
| 11 |
+
SESSIONS_DIR_NAME = os.environ.get('SESSIONS_DIR_PATH', 'sessions')
|
| 12 |
+
DB_USER = os.environ.get('DB_USER')
|
| 13 |
+
DB_PASS = os.environ.get('DB_PASS')
|
| 14 |
+
DB_HOST = os.environ.get('DB_HOST')
|
| 15 |
+
DB_NAME = os.environ.get('DB_NAME')
|
| 16 |
+
|
| 17 |
+
SCHEDULED_CHECK_INTERVAL = 5 # minutes
|
| 18 |
+
|
| 19 |
+
# api_blueprint variables
|
| 20 |
+
BASE_PATH = os.environ.get('BASE_PATH', '/')
|
| 21 |
+
PANTS_PATH = os.environ.get('PANTS_PATH')
|
| 22 |
+
MAIN_NIFTI_FORM_NAME = 'MAIN_NIFTI'
|
| 23 |
+
MAIN_NPZ_FILENAME = 'ct.npz'
|
| 24 |
+
MAIN_NIFTI_FILENAME = 'ct.nii.gz'
|
| 25 |
+
COMBINED_LABELS_FILENAME = 'combined_labels.npz'
|
| 26 |
+
COMBINED_LABELS_NIFTI_FILENAME = 'combined_labels.nii.gz'
|
| 27 |
+
ORGAN_INTENSITIES_FILENAME = 'organ_intensities.json'
|
| 28 |
+
SESSION_TIMEDELTA = 3 # in days
|
| 29 |
+
|
| 30 |
+
# NiftiProcessor Variables
|
| 31 |
+
EROSION_PIXELS = 2
|
| 32 |
+
CUBE_LEN = (2 * EROSION_PIXELS) + 1
|
| 33 |
+
STRUCTURING_ELEMENT = np.ones([CUBE_LEN, CUBE_LEN, CUBE_LEN], dtype=bool)
|
| 34 |
+
|
| 35 |
+
DECIMAL_PRECISION_VOLUME = 2
|
| 36 |
+
DECIMAL_PRECISION_HU = 1
|
| 37 |
+
VOXEL_THRESHOLD = 100
|
| 38 |
+
|
| 39 |
+
PREDEFINED_LABELS = {
|
| 40 |
+
0: "adrenal_gland_left",
|
| 41 |
+
1: "adrenal_gland_right",
|
| 42 |
+
2: "aorta",
|
| 43 |
+
3: "bladder",
|
| 44 |
+
4:"celiac_artery",
|
| 45 |
+
5: "colon",
|
| 46 |
+
6: "common_bile_duct",
|
| 47 |
+
7: "duodenum",
|
| 48 |
+
8: "femur_left",
|
| 49 |
+
9: "femur_right",
|
| 50 |
+
10: "gall_bladder",
|
| 51 |
+
11: "kidney_left",
|
| 52 |
+
12: "kidney_right",
|
| 53 |
+
13: "liver",
|
| 54 |
+
14: "lung_left",
|
| 55 |
+
15: "lung_right",
|
| 56 |
+
16: "pancreas_body",
|
| 57 |
+
17: "pancreas_head",
|
| 58 |
+
18: "pancreas_tail",
|
| 59 |
+
19: "pancreas",
|
| 60 |
+
20: "pancreatic_duct",
|
| 61 |
+
21: "pancreatic_lesion",
|
| 62 |
+
22: "postcava",
|
| 63 |
+
23: "prostate",
|
| 64 |
+
24: "spleen",
|
| 65 |
+
25: "stomach",
|
| 66 |
+
26: "superior_mesenteric_artery",
|
| 67 |
+
27: "veins"
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
MODEL_ALIASES = {
|
| 71 |
+
# GE
|
| 72 |
+
"lightspeed 16": "LightSpeed 16",
|
| 73 |
+
"lightspeed16": "LightSpeed 16",
|
| 74 |
+
"lightspeed vct": "LightSpeed VCT",
|
| 75 |
+
"lightspeed qx/i": "LightSpeed QX/i",
|
| 76 |
+
"lightspeed pro 16": "LightSpeed Pro 16",
|
| 77 |
+
"lightspeed pro 32": "LightSpeed Pro 32",
|
| 78 |
+
"lightspeed plus": "LightSpeed Plus",
|
| 79 |
+
"lightspeed ultra": "LightSpeed Ultra",
|
| 80 |
+
# Siemens
|
| 81 |
+
"somatom definition as+": "SOMATOM Definition AS+",
|
| 82 |
+
"somatom definition as": "SOMATOM Definition AS",
|
| 83 |
+
"somatom definition flash": "SOMATOM Definition Flash",
|
| 84 |
+
"somatom definition edge": "SOMATOM Definition Edge",
|
| 85 |
+
"somatom force": "SOMATOM Force",
|
| 86 |
+
"somatom go.top": "SOMATOM Go.Top",
|
| 87 |
+
"somatom plus 4": "SOMATOM PLUS 4",
|
| 88 |
+
"somatom scope": "SOMATOM Scope",
|
| 89 |
+
"somatom definition": "SOMATOM Definition",
|
| 90 |
+
"sensation 4": "Sensation 4",
|
| 91 |
+
"sensation 10": "Sensation 10",
|
| 92 |
+
"sensation 16": "Sensation 16",
|
| 93 |
+
"sensation 40": "Sensation 40",
|
| 94 |
+
"sensation 64": "Sensation 64",
|
| 95 |
+
"sensation cardiac 64": "Sensation Cardiac 64",
|
| 96 |
+
"sensation open": "Sensation Open",
|
| 97 |
+
"emotion 16": "Emotion 16",
|
| 98 |
+
"emotion 6 (2007)": "Emotion 6 (2007)",
|
| 99 |
+
"perspective": "Perspective",
|
| 100 |
+
# Philips
|
| 101 |
+
"brilliance 10": "Brilliance 10",
|
| 102 |
+
"brilliance 16": "Brilliance 16",
|
| 103 |
+
"brilliance 16p": "Brilliance 16P",
|
| 104 |
+
"brilliance 40": "Brilliance 40",
|
| 105 |
+
"brilliance 64": "Brilliance 64",
|
| 106 |
+
"ingenuity core 128": "Ingenuity Core 128",
|
| 107 |
+
"iqon - spectral ct": "IQon - Spectral CT",
|
| 108 |
+
"philips ct aura": "Philips CT Aura",
|
| 109 |
+
"precedence 16p": "Precedence 16P",
|
| 110 |
+
# Canon / Toshiba
|
| 111 |
+
"aquilion one": "Aquilion ONE",
|
| 112 |
+
"aquilion": "Aquilion",
|
| 113 |
+
# GE 其他
|
| 114 |
+
"optima ct540": "Optima CT540",
|
| 115 |
+
"optima ct660": "Optima CT660",
|
| 116 |
+
"optima ct520 series": "Optima CT520 Series",
|
| 117 |
+
"revolution ct": "Revolution CT",
|
| 118 |
+
"revolution evo": "Revolution EVO",
|
| 119 |
+
"discovery st": "Discovery ST",
|
| 120 |
+
"discovery ste": "Discovery STE",
|
| 121 |
+
"discovery mi": "Discovery MI",
|
| 122 |
+
"hispeed ct/i": "HiSpeed CT/i",
|
| 123 |
+
# PET/CT
|
| 124 |
+
"biograph128": "Biograph128",
|
| 125 |
+
"biograph 128": "Biograph128",
|
| 126 |
+
}
|
handle.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import nibabel as nib
|
| 2 |
+
import numpy as np
|
| 3 |
+
from constants import Constants
|
| 4 |
+
from utils import removeFileExt
|
| 5 |
+
import scipy.ndimage as ndimage
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# def combine_labels(files, session_key):
|
| 14 |
+
# filenames = list(files.keys())
|
| 15 |
+
# filenames.remove('MAIN_NIFTI')
|
| 16 |
+
# base = os.path.join('sessions', session_key) #base dir path
|
| 17 |
+
# os.makedirs(os.path.join(base, 'segmentations'))
|
| 18 |
+
# main_nifti = files['MAIN_NIFTI']
|
| 19 |
+
# main_nifti.save(os.path.join(base, main_nifti_filename))
|
| 20 |
+
# print(filenames)
|
| 21 |
+
# aorta = files['aorta.nii.gz']
|
| 22 |
+
# nib.Nifti1Image.from_bytes(aorta.read())
|
| 23 |
+
# return
|
| 24 |
+
|
| 25 |
+
def voxelThreshold(slice):
|
| 26 |
+
num_voxels = len(slice[slice > 0])
|
| 27 |
+
return num_voxels < Constants.VOXEL_THRESHOLD
|
| 28 |
+
|
| 29 |
+
def getCalcVolumeState(img_data, organ):
|
| 30 |
+
if organ in Constants.organ_ids_volumeNA:
|
| 31 |
+
return "NA" # blood vessel, volume is NA
|
| 32 |
+
slices = [
|
| 33 |
+
img_data[:, :, 0],
|
| 34 |
+
img_data[:, :, -1],
|
| 35 |
+
img_data[0, :, :],
|
| 36 |
+
img_data[-1, :, :],
|
| 37 |
+
img_data[:, 0, :],
|
| 38 |
+
img_data[:, -1, :],
|
| 39 |
+
]
|
| 40 |
+
for slice in slices:
|
| 41 |
+
if voxelThreshold(slice) is False:
|
| 42 |
+
return "incomplete"
|
| 43 |
+
return "complete"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def processMasks(sessionKey):
|
| 47 |
+
data = {"data": []}
|
| 48 |
+
ct = nib.load(os.path.join('sessions', sessionKey, Constants.main_nifti_filename)).get_fdata()
|
| 49 |
+
organ_ids = os.listdir(os.path.join('sessions', sessionKey, 'segmentations'))
|
| 50 |
+
print(organ_ids)
|
| 51 |
+
for i in range(len(organ_ids)):
|
| 52 |
+
organ_data = {}
|
| 53 |
+
organ_data['id'] = removeFileExt(organ_ids[i])
|
| 54 |
+
img = nib.load(os.path.join('sessions', sessionKey, 'segmentations', organ_ids[i]))
|
| 55 |
+
img_data = img.get_fdata()
|
| 56 |
+
state = getCalcVolumeState(img_data, organ_ids[i])
|
| 57 |
+
if state == "complete":
|
| 58 |
+
volume_cm = round(float(nib.imagestats.mask_volume(img)/1000), Constants.DECIMAL_PRECISION_VOLUME)
|
| 59 |
+
organ_data['volume_cm'] = volume_cm
|
| 60 |
+
elif state == "incomplete":
|
| 61 |
+
organ_data['volume_cm'] = "Incomplete organ"
|
| 62 |
+
elif state == "NA":
|
| 63 |
+
organ_data['volume_cm'] = "N/A"
|
| 64 |
+
|
| 65 |
+
erosion_data = ndimage.binary_erosion(img_data, structure=Constants.STRUCTURING_ELEMENT)
|
| 66 |
+
hu_values = ct[erosion_data > 0]
|
| 67 |
+
if len(hu_values) == 0:
|
| 68 |
+
organ_data['mean_hu'] = 'N/A'
|
| 69 |
+
else:
|
| 70 |
+
mean_hu = round(float(np.mean(hu_values)), Constants.DECIMAL_PRECISION_HU)
|
| 71 |
+
organ_data['mean_hu'] = mean_hu
|
| 72 |
+
|
| 73 |
+
data['data'].append(organ_data)
|
| 74 |
+
|
| 75 |
+
return data
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# def test():
|
| 79 |
+
# for organ in organ_ids:
|
| 80 |
+
# mask = nib.load(f'dev/segmentations/{organ}.nii.gz').get_fdata()
|
| 81 |
+
# for i in range(mask.shape[2]):
|
| 82 |
+
# slice = mask[:, :, i]
|
| 83 |
+
# length = len(slice[slice > 0])
|
| 84 |
+
# if length > 0:
|
| 85 |
+
# print(organ)
|
| 86 |
+
# print('num voxels: ', length)
|
| 87 |
+
# break
|
| 88 |
+
# for i in range(mask.shape[2]-1, -1, -1):
|
| 89 |
+
# slice = mask[:, :, i]
|
| 90 |
+
# length = len(slice[slice > 0])
|
| 91 |
+
# if length > 0:
|
| 92 |
+
# print(organ)
|
| 93 |
+
# print('num voxels: ', length)
|
| 94 |
+
# break
|
| 95 |
+
# for i in range(mask.shape[0]):
|
| 96 |
+
# slice = mask[i, :, :]
|
| 97 |
+
# length = len(slice[slice > 0])
|
| 98 |
+
# if length > 0:
|
| 99 |
+
# print(organ)
|
| 100 |
+
# print('num voxels: ', length)
|
| 101 |
+
# break
|
| 102 |
+
# for i in range(mask.shape[0]-1, -1, -1):
|
| 103 |
+
# slice = mask[i, :, :]
|
| 104 |
+
# length = len(slice[slice > 0])
|
| 105 |
+
# if length > 0:
|
| 106 |
+
# print(organ)
|
| 107 |
+
# print('num voxels: ', length)
|
| 108 |
+
# break
|
| 109 |
+
# for i in range(mask.shape[1]):
|
| 110 |
+
# slice = mask[:, i, :]
|
| 111 |
+
# length = len(slice[slice > 0])
|
| 112 |
+
# if length > 0:
|
| 113 |
+
# print(organ)
|
| 114 |
+
# print('num voxels: ', length)
|
| 115 |
+
# break
|
| 116 |
+
# for i in range(mask.shape[1]-1, -1, -1):
|
| 117 |
+
# slice = mask[:, i, :]
|
| 118 |
+
# length = len(slice[slice > 0])
|
| 119 |
+
# if length > 0:
|
| 120 |
+
# print(organ)
|
| 121 |
+
# print('num voxels: ', length)
|
| 122 |
+
# break
|
| 123 |
+
|
| 124 |
+
# test()
|
index-Bv-pE24x.js
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
migrations/script.py.mako
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""${message}
|
| 2 |
+
|
| 3 |
+
Revision ID: ${up_revision}
|
| 4 |
+
Revises: ${down_revision | comma,n}
|
| 5 |
+
Create Date: ${create_date}
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
from typing import Sequence, Union
|
| 9 |
+
|
| 10 |
+
from alembic import op
|
| 11 |
+
import sqlalchemy as sa
|
| 12 |
+
${imports if imports else ""}
|
| 13 |
+
|
| 14 |
+
# revision identifiers, used by Alembic.
|
| 15 |
+
revision: str = ${repr(up_revision)}
|
| 16 |
+
down_revision: Union[str, None] = ${repr(down_revision)}
|
| 17 |
+
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
| 18 |
+
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def upgrade() -> None:
|
| 22 |
+
${upgrades if upgrades else "pass"}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def downgrade() -> None:
|
| 26 |
+
${downgrades if downgrades else "pass"}
|
models/__init__.py
ADDED
|
File without changes
|
models/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (171 Bytes). View file
|
|
|
models/__pycache__/application_session.cpython-312.pyc
ADDED
|
Binary file (1.72 kB). View file
|
|
|
models/__pycache__/base.cpython-312.pyc
ADDED
|
Binary file (503 Bytes). View file
|
|
|
models/__pycache__/combined_labels.cpython-312.pyc
ADDED
|
Binary file (1.45 kB). View file
|
|
|
models/application_session.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from models.base import db
|
| 2 |
+
from sqlalchemy.orm import Mapped, mapped_column
|
| 3 |
+
from sqlalchemy import Integer, String, DateTime
|
| 4 |
+
|
| 5 |
+
class ApplicationSession(db.Model):
|
| 6 |
+
__tablename__ = "application_session"
|
| 7 |
+
|
| 8 |
+
session_id: Mapped[str] = mapped_column(primary_key=True, unique=True, type_=String)
|
| 9 |
+
main_nifti_path: Mapped[str] = mapped_column(type_=String, nullable=False)
|
| 10 |
+
combined_labels_id: Mapped[str] = mapped_column(unique=True, type_=String)
|
| 11 |
+
session_created: Mapped[DateTime] = mapped_column(type_=DateTime)
|
| 12 |
+
session_expire_date: Mapped[DateTime] = mapped_column(type_=DateTime)
|
| 13 |
+
|
| 14 |
+
def __str__(self):
|
| 15 |
+
return f'''
|
| 16 |
+
ApplicationSession OBJECT:
|
| 17 |
+
session_id: {self.session_id}
|
| 18 |
+
main_nifti_path: {self.main_nifti_path}
|
| 19 |
+
combined_files_path: {self.combined_labels_id}
|
| 20 |
+
session_created: {self.session_created}
|
| 21 |
+
session_expires: {self.session_expire_date}'''
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
models/base.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask_sqlalchemy import SQLAlchemy
|
| 2 |
+
from sqlalchemy.orm import DeclarativeBase
|
| 3 |
+
|
| 4 |
+
class ModelBase(DeclarativeBase):
|
| 5 |
+
pass
|
| 6 |
+
|
| 7 |
+
db = SQLAlchemy(model_class=ModelBase)
|
models/combined_labels.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from models.base import ModelBase
|
| 2 |
+
from sqlalchemy.orm import Mapped, mapped_column
|
| 3 |
+
from sqlalchemy import JSON, String, Integer
|
| 4 |
+
|
| 5 |
+
class CombinedLabels(ModelBase):
|
| 6 |
+
|
| 7 |
+
__tablename__ = "combined_labels"
|
| 8 |
+
|
| 9 |
+
combined_labels_id: Mapped[str] = mapped_column(primary_key=True, type_=String)
|
| 10 |
+
combined_labels_path: Mapped[str] = mapped_column(type_=String)
|
| 11 |
+
organ_intensities: Mapped[JSON] = mapped_column(type_=JSON)
|
| 12 |
+
organ_metadata: Mapped[JSON] = mapped_column(type_=JSON)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def __str__(self):
|
| 16 |
+
return f"""
|
| 17 |
+
CombinedLabels object
|
| 18 |
+
id: {self.combined_labels_id}
|
| 19 |
+
path: {self.combined_labels_path}
|
| 20 |
+
organ_intensities: {self.organ_intensities}
|
| 21 |
+
organ_metadata: {self.organ_metadata}
|
| 22 |
+
"""
|
requirements.txt
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
blinker==1.8.2
|
| 2 |
+
certifi==2024.8.30
|
| 3 |
+
charset-normalizer==3.4.0
|
| 4 |
+
click==8.1.7
|
| 5 |
+
colorama==0.4.6
|
| 6 |
+
dnspython==2.7.0
|
| 7 |
+
Flask==3.0.3
|
| 8 |
+
Flask-Cors==4.0.1
|
| 9 |
+
Flask-SQLAlchemy==3.1.1
|
| 10 |
+
greenlet==3.1.1
|
| 11 |
+
gunicorn==23.0.0
|
| 12 |
+
idna==3.10
|
| 13 |
+
iniconfig==2.0.0
|
| 14 |
+
itsdangerous==2.2.0
|
| 15 |
+
Jinja2==3.1.4
|
| 16 |
+
Mako==1.3.8
|
| 17 |
+
nibabel==5.2.1
|
| 18 |
+
numpy==2.0.1
|
| 19 |
+
packaging==24.1
|
| 20 |
+
pluggy==1.5.0
|
| 21 |
+
psycopg2-binary==2.9.10
|
| 22 |
+
pymongo==4.10.1
|
| 23 |
+
pytest==8.3.3
|
| 24 |
+
python-dotenv==1.0.1
|
| 25 |
+
requests==2.32.3
|
| 26 |
+
reportlab==4.4.1
|
| 27 |
+
scipy==1.13.0
|
| 28 |
+
setuptools==75.6.0
|
| 29 |
+
SQLAlchemy==2.0.37
|
| 30 |
+
typing_extensions==4.12.2
|
| 31 |
+
tzlocal==5.2
|
| 32 |
+
urllib3==2.2.3
|
| 33 |
+
Werkzeug==3.0.3
|
| 34 |
+
wheel==0.45.1
|
| 35 |
+
#flupg
|
services/__init__.py
ADDED
|
File without changes
|
services/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
services/__pycache__/auto_segmentor.cpython-312.pyc
ADDED
|
Binary file (5.15 kB). View file
|
|
|
services/__pycache__/nifti_processor.cpython-312.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
services/__pycache__/npz_processor.cpython-312.pyc
ADDED
|
Binary file (12 kB). View file
|
|
|
services/__pycache__/session_manager.cpython-312.pyc
ADDED
|
Binary file (8.55 kB). View file
|
|
|
services/auto_segmentor.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import uuid
|
| 3 |
+
import subprocess
|
| 4 |
+
import re
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
# Load environment variables
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
def get_least_used_gpu(default_gpu=None):
|
| 11 |
+
if default_gpu is None:
|
| 12 |
+
try:
|
| 13 |
+
available_gpus_str = os.getenv("AVAILABLE_GPUS", "")
|
| 14 |
+
available_gpus = [int(x) for x in available_gpus_str.split(",") if x.strip().isdigit()]
|
| 15 |
+
if not available_gpus:
|
| 16 |
+
raise ValueError("No available GPUs specified.")
|
| 17 |
+
|
| 18 |
+
result = subprocess.check_output(
|
| 19 |
+
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,noheader,nounits"],
|
| 20 |
+
universal_newlines=True
|
| 21 |
+
)
|
| 22 |
+
mem_usages = [int(x) for x in result.strip().split("\n")]
|
| 23 |
+
least_used_gpu = min(available_gpus, key=lambda i: mem_usages[i])
|
| 24 |
+
return str(least_used_gpu)
|
| 25 |
+
except Exception as e:
|
| 26 |
+
print("⚠️ Failed to get GPU info, defaulting to 0:", e)
|
| 27 |
+
return "0"
|
| 28 |
+
else:
|
| 29 |
+
return str(default_gpu)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def run_auto_segmentation(input_path, session_dir, model):
|
| 33 |
+
"""
|
| 34 |
+
Run auto segmentation model using Apptainer inside the given session directory.
|
| 35 |
+
"""
|
| 36 |
+
subfolder_name = "ct"
|
| 37 |
+
|
| 38 |
+
input_case_dir = os.path.join(session_dir, "inputs")
|
| 39 |
+
outputs_root = os.path.join(session_dir, "outputs")
|
| 40 |
+
input_case_ct_dir = os.path.join(input_case_dir, subfolder_name)
|
| 41 |
+
os.makedirs(input_case_ct_dir, exist_ok=True)
|
| 42 |
+
os.makedirs(outputs_root, exist_ok=True)
|
| 43 |
+
|
| 44 |
+
input_filename = os.path.basename(input_path)
|
| 45 |
+
container_input_path = os.path.join(input_case_ct_dir, input_filename)
|
| 46 |
+
os.system(f"cp {input_path} {container_input_path}")
|
| 47 |
+
|
| 48 |
+
conda_activate_cmd = ""
|
| 49 |
+
|
| 50 |
+
conda_path = os.getenv("CONDA_ACTIVATE_PATH", "/opt/anaconda3/etc/profile.d/conda.sh")
|
| 51 |
+
epai_env_name = os.getenv("CONDA_ENV_EPAI", "epai")
|
| 52 |
+
suprem_sandbox_path = os.getenv("SUPREM_SANDBOX_PATH", "")
|
| 53 |
+
epai_script_path = os.getenv("EPAI_SCRIPT_PATH", "")
|
| 54 |
+
|
| 55 |
+
if model == 'SuPreM':
|
| 56 |
+
container_path = suprem_sandbox_path
|
| 57 |
+
print(input_case_dir, outputs_root)
|
| 58 |
+
|
| 59 |
+
apptainer_cmd = [
|
| 60 |
+
"apptainer", "run", "--nv",
|
| 61 |
+
"-B", f"{input_case_dir}:/workspace/inputs",
|
| 62 |
+
"-B", f"{outputs_root}:/workspace/outputs",
|
| 63 |
+
container_path
|
| 64 |
+
]
|
| 65 |
+
elif model == 'ePAI':
|
| 66 |
+
conda_activate_cmd = f"source {conda_path} && conda activate {epai_env_name} &&"
|
| 67 |
+
apptainer_cmd = ["bash", epai_script_path, session_dir]
|
| 68 |
+
else:
|
| 69 |
+
print(f"[ERROR] Unknown model: {model}")
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
selected_gpu = get_least_used_gpu()
|
| 73 |
+
apptainer_cmd = ["CUDA_VISIBLE_DEVICES=" + selected_gpu] + apptainer_cmd
|
| 74 |
+
print(apptainer_cmd)
|
| 75 |
+
try:
|
| 76 |
+
print(f"[INFO] Running {model} auto segmentation for file: {input_filename}")
|
| 77 |
+
full_cmd = f"{conda_activate_cmd} {' '.join(apptainer_cmd)}"
|
| 78 |
+
subprocess.run(full_cmd, shell=True, executable="/bin/bash", check=True)
|
| 79 |
+
except subprocess.CalledProcessError as e:
|
| 80 |
+
print(f"[ERROR] {model} inference failed:", e)
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
if model == 'SuPreM':
|
| 84 |
+
output_path = os.path.join(outputs_root, subfolder_name, "segmentations")
|
| 85 |
+
if not os.path.exists(output_path):
|
| 86 |
+
print("[ERROR] Output mask not found at:", output_path)
|
| 87 |
+
return None
|
| 88 |
+
elif model == 'ePAI':
|
| 89 |
+
output_path = os.path.join(outputs_root, subfolder_name, "combined_labels.nii.gz")
|
| 90 |
+
if not os.path.exists(output_path):
|
| 91 |
+
print("[ERROR] Output mask not found at:", output_path)
|
| 92 |
+
return None
|
| 93 |
+
output_path = os.path.join(outputs_root, subfolder_name)
|
| 94 |
+
|
| 95 |
+
return output_path
|
services/nifti_processor.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import nibabel as nib
|
| 2 |
+
import numpy as np
|
| 3 |
+
from constants import Constants
|
| 4 |
+
from werkzeug.datastructures import MultiDict
|
| 5 |
+
import scipy.ndimage as ndimage
|
| 6 |
+
import os
|
| 7 |
+
import tempfile
|
| 8 |
+
from scipy.ndimage import label
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def has_large_connected_component(slice_mask, threshold=8):
|
| 12 |
+
"""
|
| 13 |
+
Check if there is a connected component larger than a threshold in a 2D mask.
|
| 14 |
+
"""
|
| 15 |
+
labeled, num_features = label(slice_mask)
|
| 16 |
+
sizes = np.bincount(labeled.ravel())
|
| 17 |
+
sizes[0] = 0 # ignore background
|
| 18 |
+
return np.any(sizes > threshold)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class NiftiProcessor:
|
| 22 |
+
def __init__(self, main_nifti_path, clabel_path, organ_intensities=None):
|
| 23 |
+
self._main_nifti_path = main_nifti_path
|
| 24 |
+
self._clabel_path = clabel_path
|
| 25 |
+
self.number_max = 999999
|
| 26 |
+
self._organ_intensities = organ_intensities
|
| 27 |
+
|
| 28 |
+
def set_organ_intensities(self, organ_intensities):
|
| 29 |
+
self._organ_intensities = organ_intensities
|
| 30 |
+
|
| 31 |
+
@classmethod
|
| 32 |
+
def from_clabel_path(cls, clabel_path):
|
| 33 |
+
return cls(None, clabel_path)
|
| 34 |
+
|
| 35 |
+
def calculate_metrics(self):
|
| 36 |
+
"""
|
| 37 |
+
Calculate volume and mean HU for each organ based on the segmentation.
|
| 38 |
+
"""
|
| 39 |
+
if self._organ_intensities is None or self._clabel_path is None or self._main_nifti_path is None:
|
| 40 |
+
raise Exception("Cannot calculate metrics if self._organ_intensities, self._clabel_path, or self._main_nifti_path is None.")
|
| 41 |
+
|
| 42 |
+
data = {"organ_metrics": []}
|
| 43 |
+
|
| 44 |
+
clabel_obj = nib.load(self._clabel_path)
|
| 45 |
+
main_nifti_obj = nib.load(self._main_nifti_path)
|
| 46 |
+
|
| 47 |
+
clabel_array = np.around(clabel_obj.get_fdata())
|
| 48 |
+
clabel_header = clabel_obj.header
|
| 49 |
+
main_nifti_array = main_nifti_obj.get_fdata()
|
| 50 |
+
|
| 51 |
+
intensities, frequencies = np.unique(clabel_array, return_counts=True)
|
| 52 |
+
int_freq = {round(intensities[i]): int(frequencies[i]) for i in range(len(intensities))}
|
| 53 |
+
|
| 54 |
+
voxel_dims_mm = clabel_header.get_zooms()
|
| 55 |
+
voxel_volume_cm3 = np.prod(voxel_dims_mm) / 1000 # convert mm³ to cm³
|
| 56 |
+
for organ, label_val in self._organ_intensities.items():
|
| 57 |
+
binary_mask = (clabel_array == label_val)
|
| 58 |
+
slice_0 = binary_mask[:, :, 0]
|
| 59 |
+
slice_last = binary_mask[:, :, -1]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
if has_large_connected_component(slice_0, 8) or has_large_connected_component(slice_last, 8):
|
| 63 |
+
data["organ_metrics"].append({
|
| 64 |
+
"organ_name": organ,
|
| 65 |
+
"volume_cm3": self.number_max,
|
| 66 |
+
"mean_hu": self.number_max
|
| 67 |
+
})
|
| 68 |
+
continue
|
| 69 |
+
if label_val in int_freq:
|
| 70 |
+
volume_cm3 = round(float(int_freq[label_val] * voxel_volume_cm3), Constants.DECIMAL_PRECISION_VOLUME)
|
| 71 |
+
else:
|
| 72 |
+
volume_cm3 = 0
|
| 73 |
+
mean_hu = self.calculate_mean_hu_with_erosion(binary_mask, main_nifti_array)
|
| 74 |
+
|
| 75 |
+
data["organ_metrics"].append({
|
| 76 |
+
"organ_name": organ,
|
| 77 |
+
"volume_cm3": volume_cm3,
|
| 78 |
+
"mean_hu": mean_hu
|
| 79 |
+
})
|
| 80 |
+
|
| 81 |
+
return data
|
| 82 |
+
|
| 83 |
+
def calculate_mean_hu_with_erosion(self, binary_mask, ct_array):
|
| 84 |
+
"""
|
| 85 |
+
Calculate mean HU using erosion to avoid edge noise.
|
| 86 |
+
"""
|
| 87 |
+
erosion_array = ndimage.binary_erosion(binary_mask, structure=Constants.STRUCTURING_ELEMENT)
|
| 88 |
+
hu_values = ct_array[erosion_array > 0]
|
| 89 |
+
|
| 90 |
+
if hu_values.size == 0:
|
| 91 |
+
hu_values = ct_array[binary_mask > 0]
|
| 92 |
+
|
| 93 |
+
if hu_values.size == 0:
|
| 94 |
+
return 0
|
| 95 |
+
|
| 96 |
+
return round(float(np.mean(hu_values)), Constants.DECIMAL_PRECISION_HU)
|
| 97 |
+
|
| 98 |
+
def combine_labels(self, filenames: list[str], nifti_multi_dict: MultiDict, save=True):
|
| 99 |
+
"""
|
| 100 |
+
Merge multiple label masks into one combined segmentation and re-index the labels.
|
| 101 |
+
"""
|
| 102 |
+
organ_intensities = {}
|
| 103 |
+
|
| 104 |
+
if len(filenames) == 1:
|
| 105 |
+
filename = filenames[0]
|
| 106 |
+
segmentation = nifti_multi_dict[filename]
|
| 107 |
+
data = segmentation.read()
|
| 108 |
+
|
| 109 |
+
with tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=False) as temp:
|
| 110 |
+
temp.write(data)
|
| 111 |
+
temp.flush()
|
| 112 |
+
temp_path = temp.name
|
| 113 |
+
|
| 114 |
+
combined_labels = nib.load(temp_path)
|
| 115 |
+
|
| 116 |
+
combined_labels_img_data = combined_labels.get_fdata().astype(np.uint8)
|
| 117 |
+
|
| 118 |
+
unique_labels = sorted([v for v in np.unique(combined_labels_img_data) if v != 0])
|
| 119 |
+
original_to_new = {}
|
| 120 |
+
|
| 121 |
+
for new_label, original_label in enumerate(unique_labels, start=1):
|
| 122 |
+
original_to_new[int(original_label)] = new_label
|
| 123 |
+
combined_labels_img_data[combined_labels_img_data == original_label] = new_label
|
| 124 |
+
|
| 125 |
+
for original_label, new_label in original_to_new.items():
|
| 126 |
+
organ_name = Constants.PREDEFINED_LABELS.get(original_label, f"label_{original_label}")
|
| 127 |
+
organ_intensities[organ_name] = new_label
|
| 128 |
+
|
| 129 |
+
combined_labels_header = combined_labels.header
|
| 130 |
+
combined_labels_affine = combined_labels.affine
|
| 131 |
+
|
| 132 |
+
combined_labels = nib.Nifti1Image(
|
| 133 |
+
combined_labels_img_data,
|
| 134 |
+
affine=combined_labels_affine,
|
| 135 |
+
header=combined_labels_header
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
else:
|
| 140 |
+
combined_labels_img_data = None
|
| 141 |
+
combined_labels_header = None
|
| 142 |
+
combined_labels_affine = None
|
| 143 |
+
|
| 144 |
+
for i in range(len(filenames)):
|
| 145 |
+
filename = filenames[i]
|
| 146 |
+
segmentation = nifti_multi_dict[filename]
|
| 147 |
+
data = segmentation.read()
|
| 148 |
+
|
| 149 |
+
with tempfile.NamedTemporaryFile(suffix='.nii.gz', delete=True) as temp:
|
| 150 |
+
temp.write(data)
|
| 151 |
+
nifti_obj = nib.load(temp.name)
|
| 152 |
+
|
| 153 |
+
if combined_labels_header is None:
|
| 154 |
+
combined_labels_header = nifti_obj.header
|
| 155 |
+
|
| 156 |
+
if combined_labels_img_data is None:
|
| 157 |
+
combined_labels_img_data = np.ndarray(shape=nifti_obj.shape, dtype=np.float64)
|
| 158 |
+
|
| 159 |
+
if combined_labels_affine is None:
|
| 160 |
+
combined_labels_affine = nifti_obj.affine
|
| 161 |
+
|
| 162 |
+
img_data = nifti_obj.get_fdata()
|
| 163 |
+
scaled = img_data * np.float64(i + 1)
|
| 164 |
+
combined_labels_img_data = np.maximum(combined_labels_img_data, scaled)
|
| 165 |
+
|
| 166 |
+
organ_intensities[filename] = i + 1
|
| 167 |
+
|
| 168 |
+
combined_labels = nib.nifti1.Nifti1Image(
|
| 169 |
+
dataobj=combined_labels_img_data,
|
| 170 |
+
affine=combined_labels_affine,
|
| 171 |
+
header=combined_labels_header
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
if save:
|
| 175 |
+
nib.save(combined_labels, self._clabel_path)
|
| 176 |
+
|
| 177 |
+
return combined_labels, organ_intensities
|
| 178 |
+
|
| 179 |
+
def __str__(self):
|
| 180 |
+
return f"NiftiProcessor Object\n main_nifti_path: {self._main_nifti_path}\n clabel_path: {self._clabel_path}"
|
| 181 |
+
|
| 182 |
+
def calculate_pdac_sma_staging(self):
|
| 183 |
+
"""
|
| 184 |
+
Determine staging of pancreatic cancer based on SMA contact ratio.
|
| 185 |
+
"""
|
| 186 |
+
if self._clabel_path is None:
|
| 187 |
+
raise Exception("clabel path is not set.")
|
| 188 |
+
|
| 189 |
+
clabel_obj = nib.load(self._clabel_path)
|
| 190 |
+
clabel_data = np.around(clabel_obj.get_fdata()).astype(np.uint8)
|
| 191 |
+
|
| 192 |
+
PDAC_LABEL = 20 # pancreatic_pdac
|
| 193 |
+
SMA_LABEL = 26 # superior_mesenteric_artery
|
| 194 |
+
|
| 195 |
+
pdac_mask = (clabel_data == PDAC_LABEL)
|
| 196 |
+
sma_mask = (clabel_data == SMA_LABEL)
|
| 197 |
+
|
| 198 |
+
if np.sum(pdac_mask) == 0:
|
| 199 |
+
return "Stage T1 (No PDAC tumor present)"
|
| 200 |
+
if np.sum(sma_mask) == 0:
|
| 201 |
+
return "Unknown (SMA not found)"
|
| 202 |
+
|
| 203 |
+
pdac_dilated = ndimage.binary_dilation(pdac_mask, structure=Constants.STRUCTURING_ELEMENT)
|
| 204 |
+
contact_voxels = pdac_dilated & sma_mask
|
| 205 |
+
contact_ratio = np.sum(contact_voxels) / np.sum(sma_mask)
|
| 206 |
+
|
| 207 |
+
if contact_ratio > 0.7:
|
| 208 |
+
return "Stage T4 (SMA encasement > 180°)"
|
| 209 |
+
elif contact_ratio > 0.3:
|
| 210 |
+
return "Stage T3 (SMA encasement ~90°–180°)"
|
| 211 |
+
elif contact_ratio > 0:
|
| 212 |
+
return "Stage T2 (SMA contact < 90°)"
|
| 213 |
+
else:
|
| 214 |
+
return "Stage T1 (No SMA contact)"
|
services/npz_processor.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import nibabel as nib
|
| 2 |
+
import numpy as np
|
| 3 |
+
from constants import Constants
|
| 4 |
+
from werkzeug.datastructures import MultiDict
|
| 5 |
+
import scipy.ndimage as ndimage
|
| 6 |
+
import os, sys
|
| 7 |
+
import tempfile
|
| 8 |
+
from scipy.ndimage import label
|
| 9 |
+
import pathlib
|
| 10 |
+
from openpyxl import load_workbook
|
| 11 |
+
import json
|
| 12 |
+
|
| 13 |
+
def get_panTS_id(index):
|
| 14 |
+
cur_case_id = str(index)
|
| 15 |
+
iter = max(0, 8 - len(str(index)))
|
| 16 |
+
for _ in range(iter):
|
| 17 |
+
cur_case_id = "0" + cur_case_id
|
| 18 |
+
cur_case_id = "PanTS_" + cur_case_id
|
| 19 |
+
return cur_case_id
|
| 20 |
+
|
| 21 |
+
def has_large_connected_component(slice_mask, threshold=8):
|
| 22 |
+
"""
|
| 23 |
+
Check if there is a connected component larger than a threshold in a 2D mask.
|
| 24 |
+
"""
|
| 25 |
+
labeled, num_features = label(slice_mask)
|
| 26 |
+
sizes = np.bincount(labeled.ravel())
|
| 27 |
+
sizes[0] = 0 # ignore background
|
| 28 |
+
return np.any(sizes > threshold)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class NpzProcessor:
|
| 32 |
+
def __init__(self, main_npz_path=None, clabel_path=None, organ_intensities=None):
|
| 33 |
+
self._main_nifti_path = main_npz_path
|
| 34 |
+
self._clabel_path = clabel_path
|
| 35 |
+
self.number_max = 999999
|
| 36 |
+
self._organ_intensities = organ_intensities
|
| 37 |
+
|
| 38 |
+
def set_organ_intensities(self, organ_intensities):
|
| 39 |
+
self._organ_intensities = organ_intensities
|
| 40 |
+
|
| 41 |
+
@classmethod
|
| 42 |
+
def from_clabel_path(cls, clabel_path):
|
| 43 |
+
|
| 44 |
+
return cls(None, clabel_path)
|
| 45 |
+
|
| 46 |
+
# not used
|
| 47 |
+
def calculate_mean_hu_with_erosion(self, binary_mask, ct_array):
|
| 48 |
+
"""
|
| 49 |
+
Calculate mean HU using erosion to avoid edge noise.
|
| 50 |
+
"""
|
| 51 |
+
erosion_array = ndimage.binary_erosion(binary_mask, structure=Constants.STRUCTURING_ELEMENT)
|
| 52 |
+
hu_values = ct_array[erosion_array > 0]
|
| 53 |
+
|
| 54 |
+
if hu_values.size == 0:
|
| 55 |
+
hu_values = ct_array[binary_mask > 0]
|
| 56 |
+
|
| 57 |
+
if hu_values.size == 0:
|
| 58 |
+
return 0
|
| 59 |
+
|
| 60 |
+
return round(float(np.mean(hu_values)), Constants.DECIMAL_PRECISION_HU)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def npz_to_nifti(self, id: int, combined_label=True, save=True, path=None):
|
| 64 |
+
subfolder = "LabelTr" if id < 9000 else "LabelTe"
|
| 65 |
+
image_subfolder = "ImageTe" if id >= 9000 else "ImageTr"
|
| 66 |
+
|
| 67 |
+
if combined_label and path is None:
|
| 68 |
+
dir_path = pathlib.Path(f"{Constants.PANTS_PATH}/data/{subfolder}/{get_panTS_id(id)}/{Constants.COMBINED_LABELS_FILENAME}")
|
| 69 |
+
else:
|
| 70 |
+
dir_path = pathlib.Path(path)
|
| 71 |
+
|
| 72 |
+
nifti_path = pathlib.Path(f"{Constants.PANTS_PATH}/data/{image_subfolder}/{get_panTS_id(id)}/{Constants.MAIN_NIFTI_FILENAME}")
|
| 73 |
+
nifti_dat = nib.load(nifti_path)
|
| 74 |
+
|
| 75 |
+
arr = np.load(dir_path)["data"].astype(np.float32)
|
| 76 |
+
img = nib.nifti1.Nifti1Image(arr, affine=nifti_dat.affine, header=nifti_dat.header)
|
| 77 |
+
|
| 78 |
+
nib.save(img, dir_path.with_suffix(".nii.gz"))
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def combine_labels(self, id: int, keywords={"pancrea": "pancreas"}, save=True):
|
| 82 |
+
"""
|
| 83 |
+
Merge multiple label masks into one combined segmentation and re-index the labels.
|
| 84 |
+
"""
|
| 85 |
+
organ_intensities = {}
|
| 86 |
+
segment_subfolder = "LabelTr"
|
| 87 |
+
if id >= 9000:
|
| 88 |
+
segment_subfolder = "LabelTe"
|
| 89 |
+
|
| 90 |
+
image_subfolder = "ImageTe" if id >= 9000 else "ImageTr"
|
| 91 |
+
|
| 92 |
+
nifti_path = pathlib.Path(f"{Constants.PANTS_PATH}/data/{image_subfolder}/{get_panTS_id(id)}/{Constants.MAIN_NIFTI_FILENAME}")
|
| 93 |
+
nifti_dat = nib.load(nifti_path)
|
| 94 |
+
|
| 95 |
+
dir_path = pathlib.Path(f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/segmentations")
|
| 96 |
+
npz_files = list(dir_path.glob("*.npz"))
|
| 97 |
+
|
| 98 |
+
combined_labels_img_data = None
|
| 99 |
+
keyword_dict = {organ: None for organ in keywords.values()}
|
| 100 |
+
|
| 101 |
+
for i, file in enumerate(npz_files):
|
| 102 |
+
filename = file.name
|
| 103 |
+
data = np.load(file)["data"]
|
| 104 |
+
|
| 105 |
+
if combined_labels_img_data is None:
|
| 106 |
+
combined_labels_img_data = np.ndarray(shape=data.shape, dtype=np.float64)
|
| 107 |
+
|
| 108 |
+
matched = False
|
| 109 |
+
for substring, organ in keywords.items():
|
| 110 |
+
if substring in filename:
|
| 111 |
+
if keyword_dict[organ] is None:
|
| 112 |
+
keyword_dict[organ] = np.ndarray(shape=data.shape, dtype=np.float64)
|
| 113 |
+
scaled = data * np.float64(i + 1)
|
| 114 |
+
keyword_dict[organ] = np.maximum(keyword_dict[organ], scaled)
|
| 115 |
+
combined_labels_img_data = np.maximum(combined_labels_img_data, scaled)
|
| 116 |
+
organ_intensities[organ] = i + 1
|
| 117 |
+
matched = True
|
| 118 |
+
break
|
| 119 |
+
|
| 120 |
+
if not matched: # no keyword match, still add to combined
|
| 121 |
+
scaled = data * np.float64(i + 1)
|
| 122 |
+
combined_labels_img_data = np.maximum(combined_labels_img_data, scaled)
|
| 123 |
+
organ_intensities[filename] = i + 1
|
| 124 |
+
|
| 125 |
+
if save:
|
| 126 |
+
# save each organ-specific file
|
| 127 |
+
for organ, data in keyword_dict.items():
|
| 128 |
+
if data is not None:
|
| 129 |
+
save_path = (
|
| 130 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/segmentations/{organ}.nii.gz"
|
| 131 |
+
)
|
| 132 |
+
img = nib.Nifti1Image(data, affine=nifti_dat.affine, header=nifti_dat.header)
|
| 133 |
+
nib.save(img, save_path)
|
| 134 |
+
|
| 135 |
+
# save combined labels
|
| 136 |
+
if combined_labels_img_data is not None:
|
| 137 |
+
save_path = (
|
| 138 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/{Constants.COMBINED_LABELS_FILENAME}"
|
| 139 |
+
)
|
| 140 |
+
np.savez_compressed(save_path, data=combined_labels_img_data)
|
| 141 |
+
|
| 142 |
+
# save organ intensities
|
| 143 |
+
organ_save_path = (
|
| 144 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/{Constants.ORGAN_INTENSITIES_FILENAME}"
|
| 145 |
+
)
|
| 146 |
+
with open(organ_save_path, "w") as f:
|
| 147 |
+
json.dump(organ_intensities, f)
|
| 148 |
+
|
| 149 |
+
return combined_labels_img_data, organ_intensities
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def nifti_combine_labels(self, id: int, keywords: dict[str, str] = {"pancrea": "pancreas"}, save=True):
|
| 153 |
+
"""
|
| 154 |
+
Merge multiple NIfTI label masks into one combined segmentation and re-index the labels.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
organ_intensities = {}
|
| 158 |
+
segment_subfolder = "LabelTr" if id < 9000 else "LabelTe"
|
| 159 |
+
image_subfolder = "ImageTr" if id < 9000 else "ImageTe"
|
| 160 |
+
|
| 161 |
+
# load main reference image (for affine/header)
|
| 162 |
+
nifti_path = pathlib.Path(
|
| 163 |
+
f"{Constants.PANTS_PATH}/data/{image_subfolder}/{get_panTS_id(id)}/{Constants.MAIN_NIFTI_FILENAME}"
|
| 164 |
+
)
|
| 165 |
+
base_nifti = nib.load(nifti_path)
|
| 166 |
+
|
| 167 |
+
# folder containing NIfTI segmentations
|
| 168 |
+
dir_path = pathlib.Path(
|
| 169 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/segmentations"
|
| 170 |
+
)
|
| 171 |
+
nii_files = list(dir_path.glob("*.nii*"))
|
| 172 |
+
|
| 173 |
+
if not nii_files:
|
| 174 |
+
raise FileNotFoundError(f"No NIfTI label files found in {dir_path}")
|
| 175 |
+
|
| 176 |
+
combined_labels = None
|
| 177 |
+
keyword_dict = {organ: None for organ in keywords.values()}
|
| 178 |
+
|
| 179 |
+
for i, file in enumerate(sorted(nii_files)):
|
| 180 |
+
filename = file.name
|
| 181 |
+
nii = nib.load(file)
|
| 182 |
+
data = nii.get_fdata()
|
| 183 |
+
|
| 184 |
+
if combined_labels is None:
|
| 185 |
+
combined_labels = np.zeros_like(data, dtype=np.float64)
|
| 186 |
+
|
| 187 |
+
matched = False
|
| 188 |
+
for substring, organ in keywords.items():
|
| 189 |
+
if substring.lower() in filename.lower():
|
| 190 |
+
if keyword_dict[organ] is None:
|
| 191 |
+
keyword_dict[organ] = np.zeros_like(data, dtype=np.float64)
|
| 192 |
+
scaled = data * float(i + 1)
|
| 193 |
+
keyword_dict[organ] = np.maximum(keyword_dict[organ], scaled)
|
| 194 |
+
combined_labels = np.maximum(combined_labels, scaled)
|
| 195 |
+
organ_intensities[organ] = i + 1
|
| 196 |
+
matched = True
|
| 197 |
+
break
|
| 198 |
+
|
| 199 |
+
if not matched:
|
| 200 |
+
scaled = data * float(i + 1)
|
| 201 |
+
combined_labels = np.maximum(combined_labels, scaled)
|
| 202 |
+
organ_intensities[filename] = i + 1
|
| 203 |
+
|
| 204 |
+
if save:
|
| 205 |
+
# save each organ-specific mask
|
| 206 |
+
for organ, data in keyword_dict.items():
|
| 207 |
+
if data is not None:
|
| 208 |
+
save_path = (
|
| 209 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/segmentations/{organ}.nii.gz"
|
| 210 |
+
)
|
| 211 |
+
img = nib.Nifti1Image(data, affine=base_nifti.affine, header=base_nifti.header)
|
| 212 |
+
nib.save(img, save_path)
|
| 213 |
+
|
| 214 |
+
# save combined mask as NIfTI
|
| 215 |
+
if combined_labels is not None:
|
| 216 |
+
save_path = (
|
| 217 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/{Constants.COMBINED_LABELS_NIFTI_FILENAME}"
|
| 218 |
+
)
|
| 219 |
+
img = nib.Nifti1Image(combined_labels, affine=base_nifti.affine, header=base_nifti.header)
|
| 220 |
+
nib.save(img, save_path)
|
| 221 |
+
|
| 222 |
+
# save organ intensity mapping
|
| 223 |
+
organ_save_path = (
|
| 224 |
+
f"{Constants.PANTS_PATH}/data/{segment_subfolder}/{get_panTS_id(id)}/{Constants.ORGAN_INTENSITIES_FILENAME}"
|
| 225 |
+
)
|
| 226 |
+
with open(organ_save_path, "w") as f:
|
| 227 |
+
json.dump(organ_intensities, f, indent=2)
|
| 228 |
+
|
| 229 |
+
return combined_labels, organ_intensities
|
services/session_manager.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from models.base import db
|
| 2 |
+
from models.application_session import ApplicationSession
|
| 3 |
+
from models.combined_labels import CombinedLabels
|
| 4 |
+
from datetime import datetime, timedelta
|
| 5 |
+
from constants import Constants
|
| 6 |
+
import uuid
|
| 7 |
+
import shutil
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
def generate_uuid():
|
| 11 |
+
return str(uuid.uuid4())
|
| 12 |
+
class SessionManager(object):
|
| 13 |
+
_instance = None
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.active_sessions = {} # session_id -> ApplicationSession 映射表
|
| 17 |
+
|
| 18 |
+
@classmethod
|
| 19 |
+
def instance(cls):
|
| 20 |
+
if cls._instance is None:
|
| 21 |
+
print("Creating SessionManager Instance")
|
| 22 |
+
cls._instance = cls.__new__(cls)
|
| 23 |
+
cls._instance.__init__() # ✅ 手动调用初始化
|
| 24 |
+
return cls._instance
|
| 25 |
+
|
| 26 |
+
def get_session(self, session_id):
|
| 27 |
+
"""Get ApplicationSession instance by session_id"""
|
| 28 |
+
if session_id in self.active_sessions:
|
| 29 |
+
return self.active_sessions[session_id]
|
| 30 |
+
|
| 31 |
+
#
|
| 32 |
+
stmt = db.select(ApplicationSession).where(ApplicationSession.session_id == session_id)
|
| 33 |
+
resp = db.session.execute(stmt)
|
| 34 |
+
session = resp.scalar()
|
| 35 |
+
|
| 36 |
+
if session is not None:
|
| 37 |
+
self.active_sessions[session_id] = session #
|
| 38 |
+
return session
|
| 39 |
+
|
| 40 |
+
def register_session(self, session_id):
|
| 41 |
+
"""
|
| 42 |
+
只注册 session_id,暂时不创建 ApplicationSession,等待后续信息完善。
|
| 43 |
+
"""
|
| 44 |
+
self.active_sessions[session_id] = {
|
| 45 |
+
"registered": True,
|
| 46 |
+
"created_at": datetime.now()
|
| 47 |
+
}
|
| 48 |
+
print(f"[SessionManager] Registered new session_id (lazy mode): {session_id}")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def validate_session(self, session_id):
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
def validate_clabel(self, clabel_id):
|
| 55 |
+
pass
|
| 56 |
+
|
| 57 |
+
def terminate_session(self, session_id):
|
| 58 |
+
|
| 59 |
+
stmt = db.select(ApplicationSession).where(ApplicationSession.session_id == session_id)
|
| 60 |
+
resp = db.session.execute(stmt)
|
| 61 |
+
app_session = resp.scalar()
|
| 62 |
+
combined_labels_id = app_session.combined_labels_id
|
| 63 |
+
|
| 64 |
+
stmt = db.select(CombinedLabels).where(CombinedLabels.combined_labels_id == combined_labels_id)
|
| 65 |
+
resp = db.session.execute(stmt)
|
| 66 |
+
combined_labels = resp.scalar()
|
| 67 |
+
|
| 68 |
+
db.session.delete(app_session)
|
| 69 |
+
db.session.delete(combined_labels)
|
| 70 |
+
db.session.commit()
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
print(f'removing session: {session_id}')
|
| 75 |
+
shutil.rmtree(os.path.join(Constants.SESSIONS_DIR_NAME, session_id))
|
| 76 |
+
return True
|
| 77 |
+
except:
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
def get_expired(self): #can only be used with app_context
|
| 81 |
+
print("sched check")
|
| 82 |
+
current_time = datetime.now()
|
| 83 |
+
stmt = db.select(ApplicationSession).where(ApplicationSession.session_expire_date <= current_time)
|
| 84 |
+
resp = db.session.execute(stmt)
|
| 85 |
+
return resp.scalars().all()
|
| 86 |
+
def update_session_info(self, session_id, main_nifti_path=None, combined_labels_id=None):
|
| 87 |
+
"""
|
| 88 |
+
更新session信息,如果数据库不存在则创建新的ApplicationSession。
|
| 89 |
+
"""
|
| 90 |
+
# 尝试先从数据库拿
|
| 91 |
+
stmt = db.select(ApplicationSession).where(ApplicationSession.session_id == session_id)
|
| 92 |
+
resp = db.session.execute(stmt)
|
| 93 |
+
session = resp.scalar()
|
| 94 |
+
|
| 95 |
+
if session is None:
|
| 96 |
+
# 数据库里没有,需要新建 ApplicationSession
|
| 97 |
+
if main_nifti_path is None:
|
| 98 |
+
raise ValueError(f"Cannot create ApplicationSession for {session_id} without main_nifti_path!")
|
| 99 |
+
|
| 100 |
+
created_at = datetime.now()
|
| 101 |
+
expire_at = created_at + timedelta(days=3)
|
| 102 |
+
|
| 103 |
+
session = ApplicationSession(
|
| 104 |
+
session_id=session_id,
|
| 105 |
+
main_nifti_path=main_nifti_path,
|
| 106 |
+
combined_labels_id=combined_labels_id,
|
| 107 |
+
session_created=created_at,
|
| 108 |
+
session_expire_date=expire_at
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
db.session.add(session)
|
| 112 |
+
print(f"[SessionManager] Created new ApplicationSession during update: {session_id}")
|
| 113 |
+
else:
|
| 114 |
+
# 数据库已有,直接update字段
|
| 115 |
+
if main_nifti_path is not None:
|
| 116 |
+
session.main_nifti_path = main_nifti_path
|
| 117 |
+
if combined_labels_id is not None:
|
| 118 |
+
session.combined_labels_id = combined_labels_id
|
| 119 |
+
print(f"[SessionManager] Updated existing ApplicationSession: {session_id}")
|
| 120 |
+
|
| 121 |
+
db.session.commit()
|
| 122 |
+
self.active_sessions[session_id] = session
|
| 123 |
+
return session
|
| 124 |
+
|
| 125 |
+
def bind_combined_labels_to_session(self, session_id, clabel_path, organ_intensities=None):
|
| 126 |
+
"""
|
| 127 |
+
根据已经存在的combined_labels_id创建CombinedLabels记录。
|
| 128 |
+
必须保证ApplicationSession已经有了combined_labels_id。
|
| 129 |
+
"""
|
| 130 |
+
# 拿到 session
|
| 131 |
+
session = self.get_session(session_id)
|
| 132 |
+
if session is None:
|
| 133 |
+
raise ValueError(f"Session {session_id} not found.")
|
| 134 |
+
|
| 135 |
+
# 检查session是否已经��combined_labels_id
|
| 136 |
+
combined_labels_id = session.combined_labels_id
|
| 137 |
+
if combined_labels_id is None:
|
| 138 |
+
raise ValueError(f"Session {session_id} does not have a combined_labels_id set yet.")
|
| 139 |
+
|
| 140 |
+
# 用已有的combined_labels_id创建CombinedLabels
|
| 141 |
+
new_clabel = CombinedLabels(
|
| 142 |
+
combined_labels_id=combined_labels_id,
|
| 143 |
+
combined_labels_path=clabel_path,
|
| 144 |
+
organ_intensities=organ_intensities or {},
|
| 145 |
+
organ_metadata={}
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# 保存到数据库
|
| 149 |
+
db.session.add(new_clabel)
|
| 150 |
+
db.session.commit()
|
| 151 |
+
|
| 152 |
+
print(f"[SessionManager] Bound existing CombinedLabels ID {combined_labels_id} to session {session_id}")
|
| 153 |
+
return new_clabel
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def deprecated_register_session(self, session_id, expire_minutes=60*24*3):
|
| 157 |
+
"""用已有的session_id创建并注册一个ApplicationSession到数据库和缓存"""
|
| 158 |
+
created_at = datetime.now()
|
| 159 |
+
expire_at = created_at + timedelta(minutes=expire_minutes)
|
| 160 |
+
combined_labels_id =None
|
| 161 |
+
main_nifti_path = None
|
| 162 |
+
new_session = ApplicationSession(
|
| 163 |
+
session_id=session_id,
|
| 164 |
+
main_nifti_path=main_nifti_path,
|
| 165 |
+
combined_labels_id=combined_labels_id,
|
| 166 |
+
session_created=created_at,
|
| 167 |
+
session_expire_date=expire_at,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
db.session.add(new_session)
|
| 171 |
+
db.session.commit()
|
| 172 |
+
|
| 173 |
+
self.active_sessions[session_id] = new_session
|
| 174 |
+
|
| 175 |
+
print(f"[SessionManager] Registered new session: {session_id}")
|
| 176 |
+
return new_session
|
tests/__init__.py
ADDED
|
File without changes
|
tests/functional/__init__.py
ADDED
|
File without changes
|
tests/unit/__init__.py
ADDED
|
File without changes
|
tests/unit/test_app_session_model.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
from models.application_session import ApplicationSession
|
| 3 |
+
from models.base import db
|
| 4 |
+
from services.session_manager import SessionManager
|
| 5 |
+
from app import create_app
|
| 6 |
+
from psycopg2.errors import UniqueViolation
|
| 7 |
+
from datetime import datetime, timedelta
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestApplicationSessionModel(unittest.TestCase):
|
| 11 |
+
|
| 12 |
+
def setUp(self):
|
| 13 |
+
self.db = db
|
| 14 |
+
self.app = create_app()
|
| 15 |
+
self.app_context = self.app.app_context()
|
| 16 |
+
self.app_context.push()
|
| 17 |
+
self.session_manager = SessionManager.instance()
|
| 18 |
+
self.main_nifti_path='test-sessions/test-045'
|
| 19 |
+
self.combined_labels_id=self.session_manager.generate_uuid()
|
| 20 |
+
|
| 21 |
+
db.create_all()
|
| 22 |
+
|
| 23 |
+
def tearDown(self):
|
| 24 |
+
self.db.session.remove()
|
| 25 |
+
self.app_context.pop()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_insert_into_table(self):
|
| 29 |
+
session_key = self.session_manager.generate_uuid()
|
| 30 |
+
now = datetime.now()
|
| 31 |
+
expire = now + timedelta(days=3)
|
| 32 |
+
new_session = ApplicationSession(
|
| 33 |
+
session_id=session_key,
|
| 34 |
+
main_nifti_path=self.main_nifti_path,
|
| 35 |
+
combined_labels_id=self.combined_labels_id,
|
| 36 |
+
session_created=now,
|
| 37 |
+
session_expire_date = expire,
|
| 38 |
+
)
|
| 39 |
+
db.session.add(new_session)
|
| 40 |
+
db.session.commit()
|
| 41 |
+
|
| 42 |
+
stmt = db.select(ApplicationSession).where(ApplicationSession.session_id == session_key)
|
| 43 |
+
res = db.session.execute(stmt)
|
| 44 |
+
app_session = res.scalar()
|
| 45 |
+
|
| 46 |
+
self.assertEqual(app_session.session_id, session_key)
|
| 47 |
+
self.assertEqual(app_session.main_nifti_path, self.main_nifti_path)
|
| 48 |
+
self.assertEqual(app_session.combined_labels_id, self.combined_labels_id)
|
| 49 |
+
self.assertEqual(app_session.session_created, now)
|
| 50 |
+
self.assertEqual(app_session.session_expire_date, expire)
|
| 51 |
+
|
| 52 |
+
#cleanup from DB
|
| 53 |
+
db.session.delete(app_session)
|
| 54 |
+
db.session.commit()
|
| 55 |
+
|
| 56 |
+
def test_duplicate_session_id(self):
|
| 57 |
+
now = datetime.now()
|
| 58 |
+
session_key = self.session_manager.generate_uuid()
|
| 59 |
+
new_session = ApplicationSession(
|
| 60 |
+
session_id=session_key,
|
| 61 |
+
main_nifti_path=self.main_nifti_path,
|
| 62 |
+
combined_labels_id=self.combined_labels_id,
|
| 63 |
+
session_created=now,
|
| 64 |
+
session_expire_date = now + timedelta(days=3)
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
db.session.add(new_session)
|
| 68 |
+
db.session.commit()
|
| 69 |
+
|
| 70 |
+
now = datetime.now()
|
| 71 |
+
dup_key_session = ApplicationSession(
|
| 72 |
+
session_id=session_key, #same session_id as new_session
|
| 73 |
+
main_nifti_path="test/path",
|
| 74 |
+
combined_labels_id="unique_combined_labels_id",
|
| 75 |
+
session_created=now,
|
| 76 |
+
session_expire_date = now + timedelta(days=3)
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
with self.assertRaises(UniqueViolation):
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
db.session.add(dup_key_session)
|
| 83 |
+
db.session.commit()
|
| 84 |
+
except:
|
| 85 |
+
raise UniqueViolation
|
| 86 |
+
|
| 87 |
+
db.session.rollback()
|
| 88 |
+
db.session.delete(new_session)
|
| 89 |
+
db.session.commit()
|
| 90 |
+
|
| 91 |
+
def test_duplicate_combined_labels_id(self):
|
| 92 |
+
now = datetime.now()
|
| 93 |
+
session_key = self.session_manager.generate_uuid()
|
| 94 |
+
new_session = ApplicationSession(
|
| 95 |
+
session_id=session_key,
|
| 96 |
+
main_nifti_path=self.main_nifti_path,
|
| 97 |
+
combined_labels_id=self.combined_labels_id,
|
| 98 |
+
session_created=now,
|
| 99 |
+
session_expire_date = now + timedelta(days=3),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
db.session.add(new_session)
|
| 103 |
+
db.session.commit()
|
| 104 |
+
|
| 105 |
+
session_key2 = self.session_manager.generate_uuid()
|
| 106 |
+
|
| 107 |
+
now = datetime.now()
|
| 108 |
+
session_2 = ApplicationSession(
|
| 109 |
+
session_id=session_key,
|
| 110 |
+
main_nifti_path=self.main_nifti_path,
|
| 111 |
+
combined_labels_id=self.combined_labels_id, #same as new_session
|
| 112 |
+
session_created=now,
|
| 113 |
+
session_expire_date = now + timedelta(days=3),
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
with self.assertRaises(UniqueViolation):
|
| 117 |
+
try:
|
| 118 |
+
db.session.add(session_2)
|
| 119 |
+
db.session.commit()
|
| 120 |
+
except:
|
| 121 |
+
raise UniqueViolation
|
| 122 |
+
|
| 123 |
+
db.session.rollback()
|
| 124 |
+
db.session.delete(new_session)
|
| 125 |
+
db.session.commit()
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
if __name__ == "__main__":
|
| 132 |
+
unittest.main()
|
tests/unit/test_combined_labels.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
from models.combined_labels import CombinedLabels
|
| 3 |
+
from app import create_app
|
| 4 |
+
from services.session_manager import SessionManager
|
| 5 |
+
from psycopg2.errors import UniqueViolation
|
| 6 |
+
from models.base import db
|
| 7 |
+
|
| 8 |
+
class TestCombinedLabels(unittest.TestCase):
|
| 9 |
+
|
| 10 |
+
def setUp(self):
|
| 11 |
+
self.app = create_app()
|
| 12 |
+
self.app_context = self.app.app_context()
|
| 13 |
+
self.app_context.push()
|
| 14 |
+
self.session_manager = SessionManager.instance()
|
| 15 |
+
db.create_all()
|
| 16 |
+
|
| 17 |
+
self.combined_labels_id = self.session_manager.generate_uuid()
|
| 18 |
+
self.combined_labels_path = "session_id/combined_labels.nii.gz"
|
| 19 |
+
self.organ_intensities = {"aorta": 1}
|
| 20 |
+
self.organ_metadata = {"aorta": {"meanHU": 30, "volume": 25}}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def test_insert_combined_labels(self):
|
| 24 |
+
combined_labels = CombinedLabels(
|
| 25 |
+
combined_labels_id = self.combined_labels_id,
|
| 26 |
+
combined_labels_path = self.combined_labels_path,
|
| 27 |
+
organ_intensities = self.organ_intensities,
|
| 28 |
+
organ_metadata = self.organ_metadata
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
db.session.add(combined_labels)
|
| 33 |
+
db.session.commit()
|
| 34 |
+
|
| 35 |
+
stmt = db.select(CombinedLabels).where(CombinedLabels.combined_labels_id == self.combined_labels_id)
|
| 36 |
+
resp = db.session.execute(stmt)
|
| 37 |
+
clabel = resp.scalar()
|
| 38 |
+
|
| 39 |
+
self.assertEqual(clabel.combined_labels_id, self.combined_labels_id)
|
| 40 |
+
self.assertEqual(clabel.combined_labels_path, self.combined_labels_path)
|
| 41 |
+
self.assertEqual(clabel.organ_intensities, self.organ_intensities)
|
| 42 |
+
self.assertEqual(clabel.organ_metadata, self.organ_metadata)
|
| 43 |
+
|
| 44 |
+
db.session.delete(clabel)
|
| 45 |
+
db.session.commit()
|
| 46 |
+
|
| 47 |
+
def test_duplicated_unique_id(self):
|
| 48 |
+
combined_labels = CombinedLabels(
|
| 49 |
+
combined_labels_id = self.combined_labels_id,
|
| 50 |
+
combined_labels_path = self.combined_labels_path,
|
| 51 |
+
organ_intensities = self.organ_intensities,
|
| 52 |
+
organ_metadata = self.organ_metadata
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
db.session.add(combined_labels)
|
| 56 |
+
db.session.commit()
|
| 57 |
+
|
| 58 |
+
new_clabel = CombinedLabels(
|
| 59 |
+
combined_labels_id = self.combined_labels_id,
|
| 60 |
+
combined_labels_path = "path",
|
| 61 |
+
organ_intensities = {},
|
| 62 |
+
organ_metadata = {}
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
with self.assertRaises(UniqueViolation):
|
| 66 |
+
try:
|
| 67 |
+
db.session.add(new_clabel)
|
| 68 |
+
db.session.commit()
|
| 69 |
+
except:
|
| 70 |
+
raise UniqueViolation
|
| 71 |
+
|
| 72 |
+
db.session.rollback()
|
| 73 |
+
db.session.delete(combined_labels)
|
| 74 |
+
db.session.commit()
|
| 75 |
+
|
| 76 |
+
if __name__ == "__main__":
|
| 77 |
+
unittest.main()
|
tests/unit/test_nifti_combine.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
import os
|
| 3 |
+
import nibabel as nib
|
| 4 |
+
import numpy as np
|
| 5 |
+
import io
|
| 6 |
+
|
| 7 |
+
from app import create_app
|
| 8 |
+
from services.nifti_processor import NiftiProcessor
|
| 9 |
+
from werkzeug.datastructures import MultiDict, FileStorage
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
Each test session has
|
| 14 |
+
|
| 15 |
+
ct.nii.gz
|
| 16 |
+
- Main nifti/CT Scan file
|
| 17 |
+
|
| 18 |
+
/combined_labels_ANS/combined_labels.nii.gz
|
| 19 |
+
- segmentation files combined into one (TESTING CREATED ONE AGAINST THIS ONE)
|
| 20 |
+
|
| 21 |
+
/combined_labels.nii.gz
|
| 22 |
+
- segmentation file that NiftiProcessor will create and store here (FILE THAT NEEDS TO BE TESTED)
|
| 23 |
+
|
| 24 |
+
/segmentations - Directory containing 9 nifti segmentation files, which are:
|
| 25 |
+
- aorta.nii.gz
|
| 26 |
+
- gall_bladder.nii.gz
|
| 27 |
+
- kidney_left.nii.gz
|
| 28 |
+
- kidney_right.nii.gz
|
| 29 |
+
- liver.nii.gz
|
| 30 |
+
- pancreas.nii.gz
|
| 31 |
+
- postcava.nii.gz
|
| 32 |
+
- spleen.nii.gz
|
| 33 |
+
- stomach.nii.gz
|
| 34 |
+
|
| 35 |
+
test-045:
|
| 36 |
+
ct.nii.gz ~ 17 mb
|
| 37 |
+
combined_labels ~ 151kb
|
| 38 |
+
|
| 39 |
+
test-050
|
| 40 |
+
ct.nii.gz ~ 13mb
|
| 41 |
+
combined_labels ~ 116kb
|
| 42 |
+
|
| 43 |
+
test-338
|
| 44 |
+
ct.nii.gz ~ 16mb
|
| 45 |
+
combined_labels ~ 204kb
|
| 46 |
+
"""
|
| 47 |
+
def create_nifti_multi_dict(seg_filenames: list[str], segmentation_path: str):
|
| 48 |
+
|
| 49 |
+
nifti_multi_dict = MultiDict()
|
| 50 |
+
for filename in seg_filenames:
|
| 51 |
+
path = os.path.join(segmentation_path, filename)
|
| 52 |
+
|
| 53 |
+
with open(path, 'rb') as file_stream:
|
| 54 |
+
file_storage = FileStorage(stream=io.BytesIO(file_stream.read()), filename=filename, content_type='application/gzip')
|
| 55 |
+
nifti_multi_dict.add(key=filename, value=file_storage)
|
| 56 |
+
|
| 57 |
+
return nifti_multi_dict
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class TestNiftiProcessorFunctions(unittest.TestCase):
|
| 61 |
+
|
| 62 |
+
def setUp(self):
|
| 63 |
+
self.test_case: str = ''
|
| 64 |
+
self.combined_labels_ANS: nib.nifti1.Nifti1Image = None
|
| 65 |
+
self.organ_intensities_ANS: dict = {'aorta.nii.gz': 1, 'spleen.nii.gz': 2,
|
| 66 |
+
'pancreas.nii.gz': 3, 'kidney_left.nii.gz': 4,
|
| 67 |
+
'postcava.nii.gz': 5, 'gall_bladder.nii.gz': 6,
|
| 68 |
+
'liver.nii.gz': 7, 'stomach.nii.gz': 8,
|
| 69 |
+
'kidney_right.nii.gz': 9}
|
| 70 |
+
|
| 71 |
+
def tearDown(self):
|
| 72 |
+
self.test_case = ''
|
| 73 |
+
self.combined_labels_ANS = None
|
| 74 |
+
|
| 75 |
+
def test_case_045(self):
|
| 76 |
+
print('test case 045')
|
| 77 |
+
self.test_case = 'test-045'
|
| 78 |
+
session_path = os.path.join('test-sessions', self.test_case)
|
| 79 |
+
self.combined_labels_ANS = nib.load(os.path.join(session_path, 'combined_labels_ANS', 'combined_labels.nii.gz'))
|
| 80 |
+
segmentation_path = os.path.join(session_path, 'segmentations')
|
| 81 |
+
seg_filenames = os.listdir(segmentation_path)
|
| 82 |
+
|
| 83 |
+
nifti_multi_dict = create_nifti_multi_dict(seg_filenames, segmentation_path)
|
| 84 |
+
|
| 85 |
+
nifti_processor = NiftiProcessor(main_nifti_path=None, clabel_path=os.path.join(session_path, 'combined_labels.nii.gz'))
|
| 86 |
+
combined_labels, organ_intensities = nifti_processor.combine_labels(seg_filenames, nifti_multi_dict, save=False)
|
| 87 |
+
|
| 88 |
+
differences = combined_labels.get_fdata() == self.combined_labels_ANS.get_fdata()
|
| 89 |
+
error_voxels = len(differences[differences == False])
|
| 90 |
+
dims = combined_labels.shape
|
| 91 |
+
total_voxels = dims[0] * dims[1] * dims[2]
|
| 92 |
+
error_percent = error_voxels / total_voxels
|
| 93 |
+
print(f'Number of Inaccurate Voxels: {error_voxels} Total Voxels: {total_voxels}')
|
| 94 |
+
print(f'Accuracy: {1 - error_percent}')
|
| 95 |
+
self.assertEqual(combined_labels.header, self.combined_labels_ANS.header)
|
| 96 |
+
self.assertEqual(organ_intensities, self.organ_intensities_ANS)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
print(np.unique(combined_labels.get_fdata()))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def test_case_050(self):
|
| 103 |
+
print("test_case_050")
|
| 104 |
+
self.test_case = 'test-050'
|
| 105 |
+
session_path = os.path.join('test-sessions', self.test_case)
|
| 106 |
+
self.combined_labels_ANS = nib.load(os.path.join(session_path, 'combined_labels_ANS', 'combined_labels.nii.gz'))
|
| 107 |
+
segmentation_path = os.path.join(session_path, 'segmentations')
|
| 108 |
+
seg_filenames = os.listdir(segmentation_path)
|
| 109 |
+
|
| 110 |
+
nifti_multi_dict = create_nifti_multi_dict(seg_filenames, segmentation_path)
|
| 111 |
+
|
| 112 |
+
nifti_processor = NiftiProcessor(main_nifti_path=None, clabel_path=os.path.join(session_path, 'combined_labels.nii.gz'))
|
| 113 |
+
combined_labels, organ_intensities = nifti_processor.combine_labels(seg_filenames, nifti_multi_dict, save=False)
|
| 114 |
+
# print(combined_labels)
|
| 115 |
+
|
| 116 |
+
differences = combined_labels.get_fdata() == self.combined_labels_ANS.get_fdata()
|
| 117 |
+
error_voxels = len(differences[differences == False])
|
| 118 |
+
dims = combined_labels.shape
|
| 119 |
+
total_voxels = dims[0] * dims[1] * dims[2]
|
| 120 |
+
error_percent = error_voxels / total_voxels
|
| 121 |
+
print(f'Number of Inaccurate Voxels: {error_voxels} Total Voxels: {total_voxels}')
|
| 122 |
+
print(f'Accuracy: {1 - error_percent}')
|
| 123 |
+
self.assertEqual(combined_labels.header, self.combined_labels_ANS.header)
|
| 124 |
+
self.assertEqual(organ_intensities, self.organ_intensities_ANS)
|
| 125 |
+
|
| 126 |
+
print(np.unique(combined_labels.get_fdata()))
|
| 127 |
+
|
| 128 |
+
def test_case_0338(self):
|
| 129 |
+
print("test_case_338")
|
| 130 |
+
self.test_case = 'test-338'
|
| 131 |
+
session_path = os.path.join('test-sessions', self.test_case)
|
| 132 |
+
self.combined_labels_ANS = nib.load(os.path.join(session_path, 'combined_labels_ANS', 'combined_labels.nii.gz'))
|
| 133 |
+
segmentation_path = os.path.join(session_path, 'segmentations')
|
| 134 |
+
seg_filenames = os.listdir(segmentation_path)
|
| 135 |
+
|
| 136 |
+
nifti_multi_dict = create_nifti_multi_dict(seg_filenames, segmentation_path)
|
| 137 |
+
|
| 138 |
+
nifti_processor = NiftiProcessor(main_nifti_path=None, clabel_path=os.path.join(session_path, 'combined_labels.nii.gz'))
|
| 139 |
+
combined_labels, organ_intensities = nifti_processor.combine_labels(seg_filenames, nifti_multi_dict, save=False)
|
| 140 |
+
# print(combined_labels)
|
| 141 |
+
|
| 142 |
+
differences = combined_labels.get_fdata() == self.combined_labels_ANS.get_fdata()
|
| 143 |
+
error_voxels = len(differences[differences == False])
|
| 144 |
+
dims = combined_labels.shape
|
| 145 |
+
total_voxels = dims[0] * dims[1] * dims[2]
|
| 146 |
+
error_percent = error_voxels / total_voxels
|
| 147 |
+
print(f'Number of Inaccurate Voxels: {error_voxels} Total Voxels: {total_voxels}')
|
| 148 |
+
print(f'Accuracy: {1 - error_percent}')
|
| 149 |
+
self.assertEqual(combined_labels.header, self.combined_labels_ANS.header)
|
| 150 |
+
self.assertEqual(organ_intensities, self.organ_intensities_ANS)
|
| 151 |
+
|
| 152 |
+
print(np.unique(combined_labels.get_fdata()))
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
if __name__ == "__main__":
|
| 156 |
+
unittest.main()
|
tests/unit/test_nifti_processing.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
from services.nifti_processor import NiftiProcessor
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Each test session has
|
| 6 |
+
|
| 7 |
+
ct.nii.gz
|
| 8 |
+
- Main nifti/CT Scan file
|
| 9 |
+
|
| 10 |
+
/combined_labels_ANS/combined_labels.nii.gz
|
| 11 |
+
- segmentation files combined into one (TESTING CREATED ONE AGAINST THIS ONE)
|
| 12 |
+
|
| 13 |
+
/combined_labels.nii.gz
|
| 14 |
+
- segmentation file that NiftiProcessor will create and store here (FILE THAT NEEDS TO BE TESTED)
|
| 15 |
+
|
| 16 |
+
/segmentations - Directory containing 9 nifti segmentation files, which are:
|
| 17 |
+
- aorta.nii.gz
|
| 18 |
+
- gall_bladder.nii.gz
|
| 19 |
+
- kidney_left.nii.gz
|
| 20 |
+
- kidney_right.nii.gz
|
| 21 |
+
- liver.nii.gz
|
| 22 |
+
- pancreas.nii.gz
|
| 23 |
+
- postcava.nii.gz
|
| 24 |
+
- spleen.nii.gz
|
| 25 |
+
- stomach.nii.gz
|
| 26 |
+
|
| 27 |
+
test-045:
|
| 28 |
+
ct.nii.gz ~ 17 mb
|
| 29 |
+
combined_labels ~ 151kb
|
| 30 |
+
|
| 31 |
+
test-050
|
| 32 |
+
ct.nii.gz ~ 13mb
|
| 33 |
+
combined_labels ~ 116kb
|
| 34 |
+
|
| 35 |
+
test-338
|
| 36 |
+
ct.nii.gz ~ 16mb
|
| 37 |
+
combined_labels ~ 204kb
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
organ_intensities = {
|
| 41 |
+
aorta: 1,
|
| 42 |
+
gall_bladder: 2,
|
| 43 |
+
kidney_left: 3,
|
| 44 |
+
kidney_right: 4,
|
| 45 |
+
liver: 5,
|
| 46 |
+
pancreas: 6,
|
| 47 |
+
postcava: 7,
|
| 48 |
+
spleen: 8,
|
| 49 |
+
stomach: 9
|
| 50 |
+
}
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class TestNiftiProcessing(unittest.TestCase):
|
| 56 |
+
|
| 57 |
+
def setUp(self):
|
| 58 |
+
self.clabel_path = None #overload this
|
| 59 |
+
self.main_nifti_path = None # overload this
|
| 60 |
+
|
| 61 |
+
self.organ_intensities = {"aorta": 1,
|
| 62 |
+
"gall_bladder": 2,
|
| 63 |
+
"kidney_left": 3,
|
| 64 |
+
"kidney_right": 4,
|
| 65 |
+
"liver": 5,
|
| 66 |
+
"pancreas": 6,
|
| 67 |
+
"postcava": 7,
|
| 68 |
+
"spleen": 8,
|
| 69 |
+
"stomach": 9 }
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def test_case_045(self):
|
| 73 |
+
print("testcase 045")
|
| 74 |
+
self.clabel_path = "./test-sessions/test-045/combined_labels_ANS/combined_labels.nii.gz"
|
| 75 |
+
self.main_nifti_path = "./test-sessions/test-045/ct.nii.gz"
|
| 76 |
+
nifti_processor = NiftiProcessor(self.main_nifti_path, self.clabel_path)
|
| 77 |
+
nifti_processor.set_organ_intensities(self.organ_intensities)
|
| 78 |
+
#print(nip._organ_intensities)
|
| 79 |
+
organ_metrics = nifti_processor.calculate_metrics()
|
| 80 |
+
|
| 81 |
+
self.assertNotEqual(organ_metrics, None)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_case_050(self):
|
| 85 |
+
print("testcase 050")
|
| 86 |
+
self.clabel_path = "./test-sessions/test-050/combined_labels_ANS/combined_labels.nii.gz"
|
| 87 |
+
self.main_nifti_path = "./test-sessions/test-050/ct.nii.gz"
|
| 88 |
+
|
| 89 |
+
nifti_processor = NiftiProcessor(self.main_nifti_path, self.clabel_path)
|
| 90 |
+
nifti_processor.set_organ_intensities(self.organ_intensities)
|
| 91 |
+
#print(nip._organ_intensities)
|
| 92 |
+
organ_metrics = nifti_processor.calculate_metrics()
|
| 93 |
+
|
| 94 |
+
self.assertNotEqual(organ_metrics, None)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_case_338(self):
|
| 98 |
+
print("testcase 338")
|
| 99 |
+
self.clabel_path = "./test-sessions/test-050/combined_labels_ANS/combined_labels.nii.gz"
|
| 100 |
+
self.main_nifti_path = "./test-sessions/test-050/ct.nii.gz"
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
nifti_processor = NiftiProcessor(self.main_nifti_path, self.clabel_path)
|
| 105 |
+
nifti_processor.set_organ_intensities(self.organ_intensities)
|
| 106 |
+
#print(nip._organ_intensities)
|
| 107 |
+
organ_metrics = nifti_processor.calculate_metrics()
|
| 108 |
+
|
| 109 |
+
self.assertNotEqual(organ_metrics, None)
|
| 110 |
+
|
| 111 |
+
|
tests/unit/test_scheduled_check.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
from models.base import db
|
| 3 |
+
from models.application_session import ApplicationSession
|
| 4 |
+
from models.combined_labels import CombinedLabels
|
| 5 |
+
from services.session_manager import SessionManager
|
| 6 |
+
from app import app
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestScheduledCheck(unittest.TestCase):
|
| 11 |
+
|
| 12 |
+
def setUp(self):
|
| 13 |
+
self.app = app
|
| 14 |
+
self.app_context = app.app_context()
|
| 15 |
+
self.app_context.push()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_scheduled_check(self):
|
| 19 |
+
session_manager = SessionManager.instance()
|
| 20 |
+
|
| 21 |
+
clabel_id = session_manager.generate_uuid()
|
| 22 |
+
app_session = ApplicationSession(
|
| 23 |
+
session_id = session_manager.generate_uuid(),
|
| 24 |
+
main_nifti_path = "path",
|
| 25 |
+
combined_labels_id = clabel_id,
|
| 26 |
+
session_created = datetime(2025, 1, 7),
|
| 27 |
+
session_expire_date = datetime(2025, 1, 12)
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
clabel = CombinedLabels(
|
| 31 |
+
combined_labels_id = clabel_id,
|
| 32 |
+
combined_labels_path = "path",
|
| 33 |
+
organ_intensities = {},
|
| 34 |
+
organ_metadata = {}
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
db.session.add(app_session)
|
| 38 |
+
db.session.add(clabel)
|
| 39 |
+
db.session.commit()
|
| 40 |
+
|
| 41 |
+
expired = session_manager.get_expired()
|
| 42 |
+
|
| 43 |
+
for app_session in expired:
|
| 44 |
+
print(app_session.session_id)
|
| 45 |
+
print(session_manager.terminate_session(app_session.session_id))
|
| 46 |
+
|
| 47 |
+
self.assertEqual(4, 4)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
unittest.main()
|
utils.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
class Utils:
|
| 4 |
+
@classmethod
|
| 5 |
+
def removeFileExt(cls, filename):
|
| 6 |
+
return filename[:filename.index('.')]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
#from services.session_manager import SessionManager
|
| 11 |
+
|
| 12 |
+
#s = SessionManager.instance()
|
| 13 |
+
#uuid = str(s.generate_session_key())
|
| 14 |
+
#print(uuid, type(uuid))
|