#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：DomainDrop-main 
@File    ：json_build_thyroid_pacs_splits.py
@IDE     ：PyCharm 
@Author  ：cao xu
@Date    ：2025/9/5 下午2:20 

Build PACS-style split .txt files for DomainDrop from a JSON manifest,
without moving images on disk.

Input JSON is expected to contain a list under one or more keys (e.g., "train", "val", "test", "data"),
where each item looks like:
{
    "img_path": "/abs/or/relative/path/to/image.jpg",
    "cls": "benign"   # or any class string
    # optional: "label_path": "...", "box": [x,y,w,h], etc.
}

We will infer the hospital/domain name as the first folder right under your images_root.
Example images_root:
    /data/lining/data/Structured_Dataset/Thyroid_Data/Comprehensive_data/picture/images/

If your structure is:
    images_root/
        上海十院/
            case_001/
                img1.jpg
                ...
        上海市一/
            case_002/
                ...
then the hospital is "上海十院" or "上海市一".

Outputs (saved under out_dir):
- pacs_label/thyroid_train.txt       # lines: "<RELATIVE_TO_images_root> <label_id>"
- pacs_label/thyroid_test.txt
- pacs_label/domain_names.json       # index -> hospital mapping (train first then test)
- pacs_label/classes.json            # class_name -> label_id mapping
- optional per-domain files: pacs_label/domains/<HOSPITAL>.txt

You can later point DomainDrop to use these lists with a tiny dataloader tweak (see chat).
"""
import os, sys, json, argparse, re
from collections import defaultdict, Counter
from pathlib import Path


def parse_args():
    ap = argparse.ArgumentParser()
    ap.add_argument("--json_path", type=str, help="Path to your JSON manifest.")
    ap.add_argument("--images_root", type=str, help="Root folder that contains hospital subfolders.")
    ap.add_argument("--out_dir", type=str, default="./thyroid_pacs_splits", help="Where to write output pacs_label files.")
    ap.add_argument("--train_domains", type=str, nargs="+", help="List of hospital names for training (exactly as folder names).")
    ap.add_argument("--test_domains", type=str, nargs="+", help="List of hospital names for testing (exactly as folder names).")
    ap.add_argument("--label_mapping", type=str, default="", help="Optional JSON for explicit class->id mapping, e.g. '{\"benign\":0,\"malignant\":1}'")
    ap.add_argument("--key_candidates", type=str, nargs="+", default=["train","val","test","data","items"], help="Top-level keys to look for in the JSON.")
    return ap.parse_args()


def load_items(manifest_path, key_candidates):
    with open(manifest_path, "r", encoding="utf-8") as f:
        txt = f.read()
    # Try strict JSON first
    try:
        manifest = json.loads(txt)
    except json.JSONDecodeError:
        # Attempt to sanitize common trailing commas / ellipses
        sanitized = re.sub(r"[。．…·、，,]*[.．…]+", "", txt)
        try:
            manifest = json.loads(sanitized)
        except Exception as e:
            raise RuntimeError(f"Failed to parse JSON: {e}")
    items = []
    if isinstance(manifest, dict):
        for k in key_candidates:
            v = manifest.get(k)
            if isinstance(v, list):
                items.extend(v)
        # if nothing found, maybe the dict itself is a list-like container under unknown keys
        if not items:
            for v in manifest.values():
                if isinstance(v, list):
                    items.extend(v)
    elif isinstance(manifest, list):
        items = manifest
    else:
        raise RuntimeError("Manifest must be dict or list")
    # Keep only dict-like entries with img_path
    cleaned = []
    for it in items:
        if isinstance(it, dict) and ("img_path" in it or "image" in it or "path" in it):
            if "img_path" not in it:
                # normalize alternate key names
                if "image" in it: it["img_path"] = it["image"]
                elif "path" in it: it["img_path"] = it["path"]
            cleaned.append(it)
    return cleaned


def get_hospital_from_path(img_path, images_root):
    # Return the first folder under images_root
    try:
        rel = Path(img_path).as_posix()
        root = Path(images_root).as_posix().rstrip("/") + "/"
        if rel.startswith(root):
            rel_rest = rel[len(root):]
            first = rel_rest.split("/")[0]
            return first
        # Try to find ".../picture/images/<HOSPITAL>/..."
        segs = [s for s in re.split(r"[\\/]+", root) if s]
        for i in range(len(segs)-2):
            if segs[i].lower()=="picture" and segs[i+1].lower()=="images":
                return segs[i+2]
        # Fallback: the segment after the first "images"
        for i,s in enumerate(segs[:-1]):
            if s.lower()=="images":
                return segs[i+1]
    except Exception:
        pass
    return None


def main():
    args = parse_args()
    args.json_path = "TBSRTC_and_General_data.json"
    args.images_root = "/data/lining/data/Structured_Dataset/Thyroid_Data/Comprehensive_data/picture/images/"
    args.label_mapping = '{"benign":0,"malignant":1}'
    args.train_domains = ['上海十院','上海市一','华西门诊','四川省人民','困难样本','广州市一','徐州市中心','无锡市人民','武汉协和',
                          '沈阳医科大', '米诺娃','胜利油田','遂宁中心','颐和','成都中科','郑大附一','华西医院','遵义美年',
                          '华西-赵婉君','华西-马步云']
    args.test_domains = ['301桥本结节','公开','陕西肿瘤','无锡某院','绵阳某院','昆明某院','上海十院-180例回顾性数据',
                         '上海十院-少见癌','华西某院','杭州某院']
    images_root = Path(args.images_root).resolve()
    out_dir = Path(args.out_dir).resolve()
    # (out_dir / "pacs_label" / "domains").mkdir(parents=True, exist_ok=True)

    items = load_items(args.json_path, args.key_candidates)
    if not items:
        raise SystemExit("No items found in JSON. Check structure and key names.")

    # Build/validate class mapping
    if args.label_mapping:
        class2id = json.loads(args.label_mapping)
    else:
        classes = sorted({it.get("cls") for it in items if it.get("cls") is not None})
        class2id = {c:i for i,c in enumerate(classes)}
    domain_train = set(args.train_domains)
    domain_test  = set(args.test_domains)
    all_domains = list(args.train_domains) + list(args.test_domains)
    domain2idx = {d:i for i,d in enumerate(all_domains)}

    # Accumulators
    train_lines, test_lines = [], []
    per_domain = defaultdict(list)
    missing_domain, missing_class = 0, 0

    for it in items:
        p = it.get("img_path")
        c = it.get("cls")
        if not p or c not in class2id:
            if not p or c is None:
                continue
            missing_class += 1
            continue
        # Make path relative to images_root if possible; otherwise keep as absolute
        p_posix = Path(p).as_posix()
        if p_posix.startswith(images_root.as_posix().rstrip("/") + "/"):
            rel = p_posix[len(images_root.as_posix().rstrip("/"))+1:]
        else:
            rel = p_posix  # absolute fallback (we'll still write it; loader tweak may be needed)
        hospital = get_hospital_from_path(p_posix, images_root.as_posix())
        if not hospital:
            missing_domain += 1
            continue
        line = f"{rel} {class2id[c]}"
        if hospital in domain_train:
            train_lines.append(line)
            per_domain[hospital].append(line)
        elif hospital in domain_test:
            test_lines.append(line)
            per_domain[hospital].append(line)
        else:
            # skip if hospital not listed
            continue

    # Write outputs
    with open(out_dir / "pacs_label" / "thyroid_train.txt", "w", encoding="utf-8") as f:
        f.write("\n".join(train_lines))
    with open(out_dir / "pacs_label" / "thyroid_test.txt", "w", encoding="utf-8") as f:
        f.write("\n".join(test_lines))
    for d, lines in per_domain.items():
        with open(out_dir / "pacs_label" / "domains" / f"{d}.txt", "w", encoding="utf-8") as f:
            f.write("\n".join(lines))
    with open(out_dir / "pacs_label" / "domain_names.json", "w", encoding="utf-8") as f:
        json.dump({i:d for d,i in domain2idx.items()}, f, ensure_ascii=False, indent=2)
    with open(out_dir / "pacs_label" / "classes.json", "w", encoding="utf-8") as f:
        json.dump(class2id, f, ensure_ascii=False, indent=2)

    # Stats
    print("=== Done ===")
    print(f"Wrote: {out_dir/'pacs_label'/'thyroid_train.txt'}  ({len(train_lines)} lines)")
    print(f"Wrote: {out_dir/'pacs_label'/'thyroid_test.txt'}   ({len(test_lines)} lines)")
    print(f"Per-domain files under: {out_dir/'pacs_label'/'domains'}  ({len(per_domain)} domains)")
    print(f"Class mapping saved to: {out_dir/'pacs_label'/'classes.json'}")
    print(f"Domain names saved to:  {out_dir/'pacs_label'/'domain_names.json'}")
    if missing_domain:
        print(f"WARNING: {missing_domain} entries skipped due to missing/unknown hospital from path.")
    if missing_class:
        print(f"WARNING: {missing_class} entries skipped due to unknown class string.")
    # Print a few sample lines
    for tag, coll in [("TRAIN", train_lines), ("TEST", test_lines)]:
        print(f"\nSample {tag} lines:")
        for s in coll[:5]:
            print("\n", s)


if __name__ == "__main__":
    main()
