#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：domain-drop 
@File    ：data_helper_thyroid.py
@IDE     ：PyCharm 
@Author  ：cao xu
@Date    ：2025/9/5 下午3:17 
"""
import os, random
from pathlib import Path
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from .custom_list_dataset import ListTxtDataset, WithIndex

# ===== ImageNet normalization =====
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD  = (0.229, 0.224, 0.225)


def _build_transforms(args, train: bool):
    sz = getattr(args, "image_size", 224)
    # 这些超参名保持与 train_domain.py 一致：min_scale/max_scale、random_horiz_flip、jitter、gray_flag
    if train:
        return transforms.Compose([
            transforms.RandomResizedCrop(sz, scale=(args.min_scale, args.max_scale)),
            transforms.RandomHorizontalFlip(p=float(args.random_horiz_flip)),
            transforms.ColorJitter(brightness=args.jitter, contrast=args.jitter,
                                   saturation=args.jitter, hue=0.1),
            transforms.RandomGrayscale(p=float(args.gray_flag)),
            transforms.ToTensor(),
            transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
        ])
    else:
        return transforms.Compose([
            transforms.Resize(sz),
            transforms.CenterCrop(sz),
            transforms.ToTensor(),
            transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
        ])


def _split_train_val(dataset, val_ratio, seed=0):
    n = len(dataset)
    if n == 0:
        return Subset(dataset, []), Subset(dataset, [])
    idx = list(range(n))
    random.Random(seed).shuffle(idx)
    n_val = max(1, int(n * val_ratio)) if n > 1 else 1
    n_val = min(n_val, max(1, n // 10)) if val_ratio > 0 else 0  # 防极端
    val_idx = idx[:n_val] if n_val > 0 else []
    tr_idx  = idx[n_val:] if n_val > 0 else idx
    return Subset(dataset, tr_idx), Subset(dataset, val_idx)


def _repo_root_from_this_file():
    """
    data_helper_thyroid.py 位于 repo_root/data/ 目录；
    因此其上一级就是仓库根目录（与 train_domain.py 同级）。
    """
    return Path(__file__).resolve().parent.parent


def _paths(args):
    """
    约定：
      - 图片：仍按 --data_root/images/ 读取（你传入的 --data_root = /.../picture）
      - 清单：从“仓库根目录（与 train_domain.py 同级）/thyroid_pacs_splits/pacs_label/”读取
        （即与你当前放置方式一致）
    也支持可选的 --splits_root 显式覆盖。
    """
    # images_root：/path/to/.../picture/images
    data_root = Path(args.data_root).resolve()
    images_root = data_root / "images"

    # splits_root：优先 --splits_root；否则默认到 repo 根目录 / thyroid_pacs_splits / pacs_label
    if hasattr(args, "splits_root") and args.splits_root:
        splits_root = Path(args.splits_root).resolve()
    else:
        repo_root = _repo_root_from_this_file()  # 与 train_domain.py 同级
        splits_root = repo_root / "thyroid_pacs_splits" / "pacs_label"

    return images_root, splits_root


def _dom2id_from_sources(args):
    """
    使用 train_domain.py 中准备好的 args.source（= 所有域去掉 target 后的列表）
    建立域名到 id 的映射，保证域判别头的类别数与源域一致。
    """
    return {name: i for i, name in enumerate(args.source)}


def get_train_dataloader(args, patches=False):
    images_root, splits_root = _paths(args)
    train_list = splits_root / "thyroid_train.txt"

    dom2id = _dom2id_from_sources(args)
    allowed_domains = set(args.source)  # 仅源域

    train_tf = _build_transforms(args, train=True)
    val_tf   = _build_transforms(args, train=False)

    base_train = ListTxtDataset(
        list_file=str(train_list),
        images_root=str(images_root),
        transform=train_tf,
        allowed_domains=allowed_domains,
        dom2id=dom2id,
    )
    train_set, val_set = _split_train_val(base_train, val_ratio=args.val_size, seed=args.time)

    train_loader = DataLoader(WithIndex(train_set), batch_size=args.batch_size, shuffle=True,
                              num_workers=4, pin_memory=True, drop_last=True)
    val_loader   = DataLoader(WithIndex(val_set),   batch_size=args.batch_size, shuffle=False,
                              num_workers=4, pin_memory=True)
    return train_loader, val_loader


def get_val_dataloader(args, patches=False):
    images_root, splits_root = _paths(args)

    # 优先用“按医院文件”评测；否则回退到总 test
    per_domain_file = splits_root / "domains" / f"{args.target}.txt"
    if per_domain_file.exists():
        test_list = per_domain_file
        allowed_domains = None  # 已是“按医院”清单，不再二次过滤
    else:
        test_list = splits_root / "thyroid_test.txt"
        allowed_domains = set([args.target])  # 回退到总表时再按医院名过滤

    dom2id = _dom2id_from_sources(args)  # 仅包含源域即可（target 不必出现在映射中）
    test_tf = _build_transforms(args, train=False)
    base_test = ListTxtDataset(
        list_file=str(test_list),
        images_root=str(images_root),
        transform=test_tf,
        allowed_domains=allowed_domains,
        dom2id=dom2id,
    )
    test_loader = DataLoader(WithIndex(base_test), batch_size=args.batch_size, shuffle=False,
                             num_workers=4, pin_memory=True)
    return test_loader
