#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：domain-drop 
@File    ：train_baseline_thyroid_resnet50.py
@IDE     ：PyCharm 
@Author  ：cao xu
@Date    ：2025/9/5 下午3:53 

Baseline: ImageNet-pretrained ResNet50 for thyroid benign/malignant classification (ERM).
- Train once on 20 training hospitals
- Evaluate sequentially on the 10 test hospitals (no retraining)

Assumptions:
- Folder layout:
    DATA_ROOT/
      images/                        # images_root: <Hospital>/<Case>/.../img.jpg
    REPO_ROOT/
      thyroid_pacs_splits/
        pacs_label/
          thyroid_train.txt
          thyroid_test.txt
          domains/<Hospital>.txt     # optional but recommended

- List file format: "<REL_PATH_TO_images_root> <label_id>"
- Label_id: 0=benign, 1=malignant

Author: baseline script for comparison with DomainDrop variants.
"""

import os
import random
from pathlib import Path
from typing import List, Tuple, Optional, Dict

import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision import transforms
from torchvision.models import resnet50, ResNet50_Weights
from PIL import Image, ImageFile, UnidentifiedImageError
ImageFile.LOAD_TRUNCATED_IMAGES = True    # 允许加载截断图像
Image.MAX_IMAGE_PIXELS = None             # 取消像素数限制

# ========== [ CONFIG ] ==========
# >>>>>>>> Modify these paths for your environment <<<<<<<<
DATA_ROOT   = "/data/lining/data/Structured_Dataset/Thyroid_Data/Comprehensive_data/picture"  # has images/
REPO_ROOT   = Path(__file__).resolve().parent.as_posix()  # this .py sits next to train_domain.py
SPLITS_ROOT = f"{REPO_ROOT}/thyroid_pacs_splits/pacs_label"  # where the .txt lists are stored

# Fixed hospital splits (20 train + 10 test)
TRAIN_DOMAINS = [
    "上海十院","上海市一","华西门诊","四川省人民","困难样本","广州市一","徐州市中心","无锡市人民","武汉协和","沈阳医科大",
    "米诺娃","胜利油田","遂宁中心","颐和","成都中科","郑大附一","华西医院","遵义美年","华西-赵婉君","华西-马步云"
]
TEST_DOMAINS = [
    "上海十院-180例回顾性数据","上海十院-少见癌","华西某院","301桥本结节","公开","陕西肿瘤","无锡某院","绵阳某院","昆明某院","杭州某院"
]

# Training hyper-params
SEED         = 2025
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
DEVICE       = "cuda" if torch.cuda.is_available() else "cpu"
BATCH_SIZE   = 64
EPOCHS       = 50
LR           = 2e-3
WEIGHT_DECAY = 1e-4
NUM_WORKERS  = 4
IMAGE_SIZE   = 224
VAL_RATIO    = 0.1  # 10% of training set used as validation
RESULT_DIR   = "./baseline_results"  # will save best.ckpt, logs, and per-target metrics

# ========== [ Utils / Seed ] ==========
def set_seed(seed: int):
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True  # allows fast convolutions


# ========== [ Dataset ] ==========
def infer_hospital_from_path(img_path: str, images_root: str) -> Optional[str]:
    """
    Expected: images_root/<Hospital>/<Case>/.../img.jpg
    If mismatch, tries to find .../images/<Hospital>/...
    """
    p = Path(img_path)
    root = Path(images_root)
    try:
        rel = p.relative_to(root).as_posix()
        return rel.split("/")[0]
    except Exception:
        parts = p.as_posix().split("/")
        for i, s in enumerate(parts[:-1]):
            if s.lower() == "images" and i + 1 < len(parts):
                return parts[i + 1]
    return None


class ListTxtDataset(Dataset):
    """
    Reads a list file: "<relative_path_to_images_root> <label_id>"
    Keeps only samples whose hospital is in allowed_domains, and maps hospital->domain_id via dom2id (if provided).
    """
    def __init__(self,
                 list_file: str,
                 images_root: str,
                 transform=None,
                 allowed_domains: Optional[List[str]] = None,
                 dom2id: Optional[Dict[str, int]] = None):
        self.root = images_root
        self.transform = transform
        self.allowed_domains = set(allowed_domains) if allowed_domains else None
        self.dom2id = dict(dom2id) if dom2id else {}
        self.samples: List[Tuple[str, int, int]] = []  # (img_path, class_id, domain_id)

        with open(list_file, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if not line:
                    continue
                path_text, label_text = line.rsplit(" ", 1)
                label = int(label_text)

                # Here we assume list stores relative paths (to images_root). If absolute, this still works:
                img_path = path_text if os.path.isabs(path_text) else os.path.join(self.root, path_text)

                hosp = infer_hospital_from_path(img_path, self.root)
                if self.allowed_domains and hosp not in self.allowed_domains:
                    continue
                dom = self.dom2id.get(hosp, -1)
                self.samples.append((img_path, label, dom))

    def __len__(self):
        return len(self.samples)

    def _safe_open_rgb(self, path, image_size=224, max_retries=3):
        last_err = None
        for _ in range(max_retries):
            try:
                with Image.open(path) as im:
                    return im.convert("RGB")
            except (OSError, UnidentifiedImageError) as e:
                last_err = e
        # 记录坏样本
        badlog = os.path.join(os.path.dirname(__file__), "bad_images.log")
        try:
            with open(badlog, "a", encoding="utf-8") as f:
                f.write(f"{path}\t{repr(last_err)}\n")
        except Exception:
            pass
        # 返回黑图占位，避免中断训练
        return Image.new("RGB", (IMAGE_SIZE, IMAGE_SIZE)) if 'IMAGE_SIZE' in globals() else Image.new("RGB", (
        image_size, image_size))

    def __getitem__(self, idx):
        img_path, label, dom = self.samples[idx]
        img = self._safe_open_rgb(img_path, image_size=IMAGE_SIZE)
        if self.transform is not None:
            img = self.transform(img)
        return img, label, dom


# ========== [ Transforms ] ==========
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD  = (0.229, 0.224, 0.225)

def build_transforms(train: bool):
    if train:
        return transforms.Compose([
            transforms.RandomResizedCrop(IMAGE_SIZE, scale=(0.6, 1.0)),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.ColorJitter(0.2, 0.2, 0.2, 0.1),
            transforms.RandomGrayscale(p=0.1),
            transforms.ToTensor(),
            transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
        ])
    else:
        return transforms.Compose([
            transforms.Resize(IMAGE_SIZE),
            transforms.CenterCrop(IMAGE_SIZE),
            transforms.ToTensor(),
            transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
        ])


# ========== [ Data loaders ] ==========
def split_train_val(ds: Dataset, val_ratio=VAL_RATIO, seed=SEED):
    n = len(ds)
    idx = list(range(n))
    random.Random(seed).shuffle(idx)
    n_val = max(1, int(n * val_ratio)) if n > 1 else 1
    val_idx = idx[:n_val]
    tr_idx  = idx[n_val:]
    return Subset(ds, tr_idx), Subset(ds, val_idx)


def get_train_val_loaders() -> Tuple[DataLoader, DataLoader]:
    images_root = f"{DATA_ROOT}/images"
    train_list  = f"{SPLITS_ROOT}/thyroid_train.txt"

    # Map only for the 20 training domains; not used by ERM, but kept for completeness.
    dom2id = {name: i for i, name in enumerate(TRAIN_DOMAINS)}

    ds_full = ListTxtDataset(
        list_file=train_list,
        images_root=images_root,
        transform=build_transforms(train=True),
        allowed_domains=TRAIN_DOMAINS,  # only keep 20 training hospitals
        dom2id=dom2id,
    )
    train_set, val_set = split_train_val(ds_full, val_ratio=VAL_RATIO, seed=SEED)

    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True,
                              num_workers=NUM_WORKERS, pin_memory=True, drop_last=True)
    val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False,
                            num_workers=NUM_WORKERS, pin_memory=True)
    return train_loader, val_loader


def get_test_loader_for_hospital(hospital_name: str) -> DataLoader:
    images_root = f"{DATA_ROOT}/images"
    per_domain_file = f"{SPLITS_ROOT}/domains/{hospital_name}.txt"
    test_tf = build_transforms(train=False)

    if os.path.exists(per_domain_file):
        # 已经是“按医院”的清单了，不再二次过滤，避免 0 样本
        ds = ListTxtDataset(per_domain_file, images_root, transform=test_tf,
                            allowed_domains=None, dom2id={hospital_name: 0})
    else:
        # 回退：总表里按医院名过滤
        test_list = f"{SPLITS_ROOT}/thyroid_test.txt"
        ds = ListTxtDataset(test_list, images_root, transform=test_tf,
                            allowed_domains=[hospital_name], dom2id={hospital_name: 0})
    return DataLoader(ds, batch_size=BATCH_SIZE, shuffle=False,
                      num_workers=NUM_WORKERS, pin_memory=True)


# ========== [ Model / Train / Eval ] ==========
class AvgMeter:
    def __init__(self):
        self.s = 0.0
        self.n = 0
    def add(self, v, k=1):
        self.s += float(v) * k
        self.n += k
    @property
    def avg(self):
        return self.s / max(1, self.n)


def accuracy(logits: torch.Tensor, targets: torch.Tensor) -> float:
    """
    logits: [B, 2], targets: [B] in {0,1}
    """
    pred = torch.argmax(logits, dim=1)
    correct = (pred == targets).sum().item()
    return correct / max(1, targets.size(0))


def build_model() -> nn.Module:
    weights = ResNet50_Weights.IMAGENET1K_V2
    model = resnet50(weights=weights)
    # replace classifier head for 2 classes
    num_feat = model.fc.in_features
    model.fc = nn.Linear(num_feat, 2)
    return model


def train_one_epoch(model, loader, criterion, optimizer, device=DEVICE):
    model.train()
    loss_meter = AvgMeter()
    acc_meter  = AvgMeter()
    for imgs, labels, _ in loader:
        imgs = imgs.to(device, non_blocking=True)
        labels = labels.to(device, non_blocking=True)

        optimizer.zero_grad()
        logits = model(imgs)
        loss = criterion(logits, labels)
        loss.backward()
        optimizer.step()

        acc = accuracy(logits.detach(), labels)
        loss_meter.add(loss.item(), k=labels.size(0))
        acc_meter.add(acc, k=labels.size(0))
    return loss_meter.avg, acc_meter.avg


@torch.no_grad()
def evaluate(model, loader, criterion, device=DEVICE):
    model.eval()
    loss_meter = AvgMeter()
    acc_meter  = AvgMeter()
    for imgs, labels, _ in loader:
        imgs = imgs.to(device, non_blocking=True)
        labels = labels.to(device, non_blocking=True)
        logits = model(imgs)
        loss   = criterion(logits, labels)
        acc    = accuracy(logits, labels)
        loss_meter.add(loss.item(), k=labels.size(0))
        acc_meter.add(acc, k=labels.size(0))
    return loss_meter.avg, acc_meter.avg


def save_ckpt(model, path: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    torch.save(model.state_dict(), path)


def main():
    set_seed(SEED)
    os.makedirs(RESULT_DIR, exist_ok=True)

    # Data
    train_loader, val_loader = get_train_val_loaders()

    # Model
    device = torch.device(DEVICE)
    model = build_model().to(device)

    # Loss / Optim
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=WEIGHT_DECAY)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=EPOCHS)

    # Train
    best_val_acc = 0.0
    best_path = f"{RESULT_DIR}/best.ckpt"
    log_file = f"{RESULT_DIR}/train_log.txt"
    with open(log_file, "w", encoding="utf-8") as flog:
        flog.write("epoch,train_loss,train_acc,val_loss,val_acc,lr\n")

    for ep in range(1, EPOCHS + 1):
        tr_loss, tr_acc = train_one_epoch(model, train_loader, criterion, optimizer, device)
        val_loss, val_acc = evaluate(model, val_loader, criterion, device)
        scheduler.step()

        lr_now = optimizer.param_groups[0]["lr"]
        msg = f"Epoch {ep:03d} | train_loss={tr_loss:.4f} acc={tr_acc:.4f} | val_loss={val_loss:.4f} acc={val_acc:.4f} | lr={lr_now:.6f}"
        print(msg)
        with open(log_file, "a", encoding="utf-8") as flog:
            flog.write(f"{ep},{tr_loss:.6f},{tr_acc:.6f},{val_loss:.6f},{val_acc:.6f},{lr_now:.8f}\n")

        if val_acc > best_val_acc:
            best_val_acc = val_acc
            save_ckpt(model, best_path)

    # Load best
    if os.path.exists(best_path):
        state = torch.load(best_path, map_location="cpu")
        model.load_state_dict(state)
        print(f"[Info] Loaded best checkpoint: {best_path} (val_acc={best_val_acc:.4f})")

    # Evaluate on 10 test hospitals sequentially
    per_target_txt = f"{RESULT_DIR}/baseline_multi_eval.txt"
    per_target_csv = f"{RESULT_DIR}/baseline_multi_eval.csv"
    with open(per_target_txt, "w", encoding="utf-8") as ftxt, \
         open(per_target_csv, "w", encoding="utf-8") as fcsv:
        fcsv.write("target,acc\n")
        for hosp in TEST_DOMAINS:
            test_loader = get_test_loader_for_hospital(hosp)
            test_loss, test_acc = evaluate(model, test_loader, criterion, device)
            line = f"[MultiEval] target={hosp} acc={test_acc:.4f} loss={test_loss:.4f}"
            print(line)
            ftxt.write(line + "\n")
            fcsv.write(f"{hosp},{test_acc:.4f}\n")

    print("[Done] Baseline training completed and multi-target evaluation saved to:", RESULT_DIR)


if __name__ == "__main__":
    main()
