#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：DomainDrop-main 
@File    ：build_thyroid_pacs_splits.py
@IDE     ：PyCharm 
@Author  ：cao xu
@Date    ：2025/9/5 下午2:56 

Build PACS-style split .txt files for DomainDrop from a folder tree:
  images_root / <Hospital> / <Case> / <image>.(jpg|png|tif|...)
  labels_root / <Hospital> / <Case> / <image>.txt   (YOLO-style: cls cx cy w h)

Output (under out_dir):
  - pacs_label/thyroid_train.txt   # "<REL_PATH_TO_images_root> <label_id>"
  - pacs_label/thyroid_test.txt
  - pacs_label/domains/<Hospital>.txt
  - pacs_label/classes.json        # {"benign":0, "malignant":1}
  - pacs_label/domain_names.json   # {0:"医院A", 1:"医院B", ...}  (train domains first, then test)

Label rule:
  If any line in the txt has first token == 1 -> malignant (1), else benign (0).

Usage:
  python build_thyroid_pacs_from_tree.py \
    --images_root "/path/to/picture/images/" \
    --labels_root "/path/to/picture/cls_labels/" \
    --out_dir     "/path/to/thyroid_pacs_splits" \
    --train_domains "上海十院" ... \
    --test_domains  "上海十院-180例回顾性数据" ...

Author: you :)
"""

import os
import sys
import argparse
import json
from pathlib import Path
from collections import defaultdict, Counter

IMG_EXTS = {".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff", ".gif"}

DEFAULT_IMAGES_ROOT = "/data/lining/data/Structured_Dataset/Thyroid_Data/Comprehensive_data/picture/images/"
DEFAULT_LABELS_ROOT = "/data/lining/data/Structured_Dataset/Thyroid_Data/Comprehensive_data/picture/cls_labels/"
DEFAULT_OUT_DIR     = "./thyroid_pacs_splits"

DEFAULT_TRAIN_DOMAINS = [ "上海十院","上海市一","华西门诊","四川省人民","困难样本","广州市一","徐州市中心","无锡市人民","武汉协和",
                          "沈阳医科大","米诺娃","胜利油田","遂宁中心","颐和","成都中科","郑大附一","华西医院","遵义美年",
                          "华西-赵婉君","华西-马步云"]
DEFAULT_TEST_DOMAINS = ["上海十院-180例回顾性数据","上海十院-少见癌","华西某院","301桥本结节","公开", "陕西肿瘤","无锡某院",
                        "绵阳某院","昆明某院","杭州某院"]


def parse_args():
    ap = argparse.ArgumentParser()
    ap.add_argument("--images_root", type=str, default=DEFAULT_IMAGES_ROOT, help="Root folder: images/<Hospital>/<Case>/<image>")
    ap.add_argument("--labels_root", type=str, default=DEFAULT_LABELS_ROOT, help="Root folder: cls_labels/<Hospital>/<Case>/<image>.txt")
    ap.add_argument("--out_dir", type=str, default=DEFAULT_OUT_DIR, help="Where to write pacs_label files")
    ap.add_argument("--train_domains", type=str, nargs="+", default=DEFAULT_TRAIN_DOMAINS, help="Hospitals used for training")
    ap.add_argument("--test_domains", type=str, nargs="+", default=DEFAULT_TEST_DOMAINS, help="Hospitals used for testing")
    return ap.parse_args()


def read_cls_from_txt(txt_path: Path) -> int:
    """
    Read YOLO-style txt. If any line starts with '1' => malignant(1), else 0.
    Robust to empty lines / spaces.
    """
    if not txt_path.exists():
        return None  # caller handles missing
    try:
        has_one = False
        with open(txt_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if not line:
                    continue
                first = line.split()[0]
                # allow "0" or "1" (int-like); ignore invalid tokens
                if first == "1":
                    has_one = True
                    break
                elif first == "0":
                    # keep checking other lines in case there is a '1'
                    continue
                else:
                    # Non 0/1 token: skip; (or you can raise)
                    continue
        return 1 if has_one else 0
    except Exception:
        return None


def build_lists(images_root: Path, labels_root: Path,
                train_domains: set, test_domains: set):
    """
    Walk images_root tree and match labels_root. Return lines and stats.
    """
    train_lines, test_lines = [], []
    per_domain = defaultdict(list)

    missing_label = 0
    skipped_ext = 0
    unknown_domain = 0

    # hospital level
    for hosp_dir in sorted([p for p in images_root.iterdir() if p.is_dir()]):
        hosp = hosp_dir.name
        # case level (may be nested; we accept deeper)
        for img_path in hosp_dir.rglob("*"):
            if not img_path.is_file():
                continue
            ext = img_path.suffix.lower()
            if ext not in IMG_EXTS:
                continue  # only image files
            # make relative path
            rel_path = img_path.relative_to(images_root).as_posix()

            # find label txt: labels_root / same relative parent / stem + ".txt"
            label_rel = img_path.relative_to(images_root).with_suffix(".txt")
            label_path = labels_root / label_rel

            cls_id = read_cls_from_txt(label_path)
            if cls_id is None:
                missing_label += 1
                continue  # skip samples without valid label

            line = f"{rel_path} {cls_id}"

            if hosp in train_domains:
                train_lines.append(line)
                per_domain[hosp].append(line)
            elif hosp in test_domains:
                test_lines.append(line)
                per_domain[hosp].append(line)
            else:
                unknown_domain += 1  # hospital not in either list -> skip

    stats = {
        "missing_label": missing_label,
        "unknown_domain": unknown_domain,
        "n_train": len(train_lines),
        "n_test": len(test_lines),
        "per_domain_counts": {k: len(v) for k, v in per_domain.items()},
    }
    return train_lines, test_lines, per_domain, stats

def main():
    args = parse_args()
    images_root = Path(args.images_root).resolve()
    labels_root = Path(args.labels_root).resolve()
    out_dir     = Path(args.out_dir).resolve()

    if not images_root.exists():
        print(f"[ERROR] images_root not found: {images_root}")
        sys.exit(1)
    if not labels_root.exists():
        print(f"[ERROR] labels_root not found: {labels_root}")
        sys.exit(1)

    # Prepare output dirs
    pacs_dir = out_dir / "pacs_label"
    dom_dir  = pacs_dir / "domains"
    dom_dir.mkdir(parents=True, exist_ok=True)

    # Domains & indices
    train_domains = list(dict.fromkeys(args.train_domains))  # preserve order & uniq
    test_domains  = list(dict.fromkeys(args.test_domains))
    all_domains   = train_domains + test_domains
    domain2idx    = {d: i for i, d in enumerate(all_domains)}
    idx2domain    = {i: d for d, i in domain2idx.items()}

    # Build
    train_lines, test_lines, per_domain, stats = build_lists(images_root, labels_root, set(train_domains), set(test_domains))

    # Write splits
    (pacs_dir / "thyroid_train.txt").write_text("\n".join(train_lines), encoding="utf-8")
    (pacs_dir / "thyroid_test.txt").write_text("\n".join(test_lines),  encoding="utf-8")

    # Per-domain detail
    for d, lines in per_domain.items():
        (dom_dir / f"{d}.txt").write_text("\n".join(lines), encoding="utf-8")

    # Class mapping (fixed)
    classes = {"benign": 0, "malignant": 1}
    (pacs_dir / "classes.json").write_text(json.dumps(classes, ensure_ascii=False, indent=2), encoding="utf-8")

    # Domain names (index -> hospital)
    (pacs_dir / "domain_names.json").write_text(json.dumps(idx2domain, ensure_ascii=False, indent=2), encoding="utf-8")

    # Report
    print("=== Done ===")
    print(f"Wrote: {pacs_dir/'thyroid_train.txt'} ({len(train_lines)} lines)")
    print(f"Wrote: {pacs_dir/'thyroid_test.txt'}  ({len(test_lines)} lines)")
    print(f"Per-domain files under: {dom_dir} ({len(per_domain)} domains)")
    print(f"Classes saved to: {pacs_dir/'classes.json'}")
    print(f"Domain names saved to: {pacs_dir/'domain_names.json'}")
    print("--- Stats ---")
    print(f"Missing/invalid labels: {stats['missing_label']}")
    print(f"Images in unknown domains: {stats['unknown_domain']}")
    print("Per-domain counts (kept):")
    for d in all_domains:
        n = stats['per_domain_counts'].get(d, 0)
        print(f"  {d}: {n}")


if __name__ == "__main__":
    main()
