#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：div-align-dg 
@File    ：thyroid_voc.py
@IDE     ：PyCharm 
@Author  ：cao xu
@Date    ：2025/8/29 上午11:30 
"""
import os
import random
import sys
import xml.etree.ElementTree as ET
from typing import List, Tuple

import torch
import torch.utils.data as data
from PIL import Image
from maskrcnn_benchmark.structures.bounding_box import BoxList
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

# 优先从同目录导入 ACVC；如无则尝试顶层
try:
    from .ACVCGenerator import ACVCGenerator
except Exception:
    from ACVCGenerator import ACVCGenerator


class ThyroidVOCDataset(data.Dataset):
    """
    Thyroid VOC-style dataset with ACVC multi-view support.

    - img_root:   .../picture/images
    - label_root: .../picture/voc_labels
    - split:      path to a list file; each line is a relative path from img_root
                  (can include subfolders), e.g. '301桥本结节/JFJ301-000...IMG02_HD.jpg'
    """

    CLASSES = ("__background__", "nodule")  # 单前景类：结节

    def __init__(
        self,
        img_root: str,
        label_root: str,
        split: str,
        use_difficult: bool = False,
        transforms=None,
        num_views: int = 2,              # A：每张图的视图数（>=1）
        corruption_list: List[str] = None,
    ):
        self.img_root = img_root
        self.label_root = label_root
        self.split = split
        self.keep_difficult = use_difficult
        self.transforms = transforms

        # 是否训练集：按 split 文件名是否包含 'train' 判断（与 multi_weather 保持一致）
        self.is_train = "train" in os.path.basename(split).lower()

        # ACVC：与 multi_weather 一致，默认 A=2（原图 + 1 个 corruption）
        self.num_views = max(1, int(num_views))
        self.acvc = ACVCGenerator()

        # 与 multi_weather 同步的 corruption 池（可按需扩展/裁剪）
        self.corruption_func = corruption_list or [
            "defocus_blur",
            "glass_blur",
            "gaussian_blur",
            "motion_blur",
            "speckle_noise",
            "shot_noise",
            "impulse_noise",
            "gaussian_noise",
            "jpeg_compression",
            "pixelate",
            "elastic_transform",
            "brightness",
            "saturate",
            "contrast",
            "high_pass_filter",
            # "phase_scaling",
        ]

        # 读 split 列表
        with open(self.split, "r", encoding="utf-8") as f:
            self.ids = [x.strip() for x in f if x.strip()]
        self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}

        # 类别映射
        self.class_to_ind = {c: i for i, c in enumerate(self.CLASSES)}
        self.categories = {i: c for i, c in enumerate(self.CLASSES)}

    # --------------------------------------------------------------------- #
    # PyTorch Dataset API
    # --------------------------------------------------------------------- #
    def __len__(self) -> int:
        return len(self.ids)

    def __getitem__(self, index: int):
        rel = self.ids[index]  # 相对 images 的路径（含子目录）
        img_path = os.path.join(self.img_root, rel)
        xml_path = os.path.join(self.label_root, os.path.splitext(rel)[0] + ".xml")
        # print("images路径: ", rel, img_path, xml_path)
        # 读图 & 读标注（PIL + BoxList）
        image = Image.open(img_path).convert("RGB")
        width, height = image.size
        # 读原始标注（BoxList，含 labels 字段）
        target_orig = self._load_annotation(xml_path)
        # 先裁剪到图像内，但不移除空；负样本要保留
        target_orig = target_orig.clip_to_image(remove_empty=False)
        
        # === 清洗标注，移除退化框，保留负样本 ===
        def _sanitize_boxlist(bl: BoxList, w: int, h: int) -> BoxList:
            # 坐标有限性检查 + 宽高>1px
            bl = bl.clip_to_image(remove_empty=False)
            bbox = bl.bbox
            if bbox.numel() == 0:
                # 显式构造空 GT，labels 也要是空
                empty = bbox.new_zeros((0, 4))
                out = BoxList(empty, (w, h), mode="xyxy")
                if bl.has_field("labels"):
                    out.add_field("labels", torch.zeros((0,), dtype=torch.int64, device=empty.device))
                return out

            # 有些框可能有 NaN/Inf，先过滤掉
            finite_mask = torch.isfinite(bbox).all(dim=1)

            TO_REMOVE = 1  # 与 BoxList 内部一致
            ws = bbox[:, 2] - bbox[:, 0] + TO_REMOVE
            hs = bbox[:, 3] - bbox[:, 1] + TO_REMOVE
            size_ok = (ws > 1) & (hs > 1)

            keep = finite_mask & size_ok
            if keep.any():
                bl = bl[keep]
                return bl
            else:
                empty = bbox.new_zeros((0, 4))
                out = BoxList(empty, (w, h), mode="xyxy")
                if bl.has_field("labels"):
                    out.add_field("labels", torch.zeros((0,), dtype=torch.int64, device=empty.device))
                return out
        target_orig = _sanitize_boxlist(target_orig, width, height)
        
        # ---- 生成多视图（A 个） ----
        views, targets = self._make_views(image, target_orig)

        # ---- 对每个视图分别做 transforms ----
        proc_imgs: List[torch.Tensor] = []
        proc_tgts: List[BoxList] = []
        for img_i, tgt_i in zip(views, targets):
            if self.transforms is not None:
                img_i, tgt_i = self.transforms(img_i, tgt_i)
            # 变换后可能再次出现越界/退化框，这里再做一次清洗
            # 从张量获取 H, W（注意 torchvision.transforms 后尺寸变了）
            if hasattr(img_i, "shape"):
                # img_i: Tensor(C,H,W)
                _, Ht, Wt = img_i.shape
                tgt_i = _sanitize_boxlist(tgt_i, Wt, Ht)
            else:
                # 兜底（极少数自定义 transform 仍是 PIL）
                Wt, Ht = img_i.size
                tgt_i = _sanitize_boxlist(tgt_i, Wt, Ht)
            proc_imgs.append(img_i)   # Tensor(C,H,W)
            proc_tgts.append(tgt_i)   # BoxList（允许为空，用于负样本）

        # 训练：返回 (A,C,H,W) + list(BoxList)；验证：返回单视图
        if self.is_train and self.num_views > 1:
            imgs_achw = torch.stack(proc_imgs, dim=0)  # (A,C,H,W)
            return imgs_achw, proc_tgts, index
        else:
            return proc_imgs[0], proc_tgts[0], index

    # --------------------------------------------------------------------- #
    # Helpers
    # --------------------------------------------------------------------- #
    def _make_views(self, image: Image.Image, target: BoxList) -> Tuple[List[Image.Image], List[BoxList]]:
        """
        生成 A 个视图：原图 + (A-1) 个 corruption 视图（仅训练时）。
        """
        views = [image]
        targets = [target]
        if self.is_train and self.num_views > 1:
            # 在 corruption 池中随机挑 A-1 个，每个随机 severities 1~5（与 multi_weather 一致）
            augs_needed = self.num_views - 1
            pool = list(self.corruption_func)
            # 池子比 augs_needed 少时允许重复抽样
            choices = (
                random.sample(pool, augs_needed) if len(pool) >= augs_needed
                else [random.choice(pool) for _ in range(augs_needed)]
            )
            for c in choices:
                s = random.randint(1, 5)
                aug_img = self.acvc.apply_corruption(image.copy(), c, s).convert("RGB")
                views.append(aug_img)
                targets.append(target)  # 几何未变更（如需翻转、仿射，请在 ACVC 内同步变换 bbox）
        return views, targets

    def get_img_info(self, index: int):
        rel = self.ids[index]
        xml_path = os.path.join(self.label_root, os.path.splitext(rel)[0] + ".xml")
        anno = ET.parse(xml_path).getroot()
        size = anno.find("size")
        if size is not None:
            h = int(size.find("height").text)
            w = int(size.find("width").text)
        else:
            w, h = Image.open(os.path.join(self.img_root, rel)).size
        return {"height": h, "width": w}

    def map_class_id_to_class_name(self, class_id: int) -> str:
        return self.CLASSES[class_id]

    def _load_annotation(self, xml_path: str) -> BoxList:
        """
        解析 VOC XML -> BoxList，保证 boxes 形状是 (N,4)，可为 N=0。
        """
        anno = ET.parse(xml_path).getroot()
        boxes = []
        labels = []
        difficults = []
        TO_REMOVE = 1

        for obj in anno.iter("object"):
            diffc = obj.find("difficult")
            difficult = 0 if diffc is None else int(diffc.text)
            if not self.keep_difficult and difficult:
                continue

            name = obj.find("name").text.lower().strip()
            if name not in self.class_to_ind:
                # 忽略非目标类别
                continue

            bb = obj.find("bndbox")
            # VOC -> float -> 0-based
            xmin = float(bb.find("xmin").text) - TO_REMOVE
            ymin = float(bb.find("ymin").text) - TO_REMOVE
            xmax = float(bb.find("xmax").text) - TO_REMOVE
            ymax = float(bb.find("ymax").text) - TO_REMOVE
            boxes.append([xmin, ymin, xmax, ymax])
            labels.append(self.class_to_ind[name])
            difficults.append(difficult)

        # 图像尺寸
        size = anno.find("size")
        if size is not None:
            W = int(size.find("width").text)
            H = int(size.find("height").text)
        else:
            # 兜底（极少出现）
            rel = os.path.splitext(os.path.relpath(xml_path, self.label_root))[0] + ".jpg"
            W, H = Image.open(os.path.join(self.img_root, rel)).size

        # 组装 BoxList
        if len(boxes) == 0:
            boxes_tensor = torch.zeros((0, 4), dtype=torch.float32)
        else:
            boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).view(-1, 4)

        target = BoxList(boxes_tensor, (W, H), mode="xyxy")
        target.add_field("labels", torch.as_tensor(labels, dtype=torch.int64))
        target.add_field("difficult", torch.as_tensor(difficults, dtype=torch.uint8))
        return target
    
    def get_groundtruth(self, index):
        """
        获取指定索引图像的真实标注（与 PascalVOCDataset 中的实现类似）。
        """
        img_id = self.ids[index]
        xml_path = os.path.join(self.label_root, os.path.splitext(img_id)[0] + ".xml")
        anno = ET.parse(xml_path).getroot()

        # 提取图像尺寸
        size = anno.find("size")
        height = int(size.find("height").text)
        width = int(size.find("width").text)

        # 获取标注框、标签和难度信息
        boxes = []
        labels = []
        difficults = []
        for obj in anno.iter("object"):
            difficult = int(obj.find("difficult").text) == 1
            name = obj.find("name").text.lower().strip()
            bb = obj.find("bndbox")

            # VOC 格式 -> 0-based -> 转换为 (xmin, ymin, xmax, ymax)
            xmin = float(bb.find("xmin").text) - 1  # 0-based index
            ymin = float(bb.find("ymin").text) - 1
            xmax = float(bb.find("xmax").text) - 1
            ymax = float(bb.find("ymax").text) - 1

            boxes.append([xmin, ymin, xmax, ymax])
            labels.append(self.class_to_ind[name])
            difficults.append(difficult)

        # 确保 boxes_tensor 具有 2 个维度 (N, 4)，每个框是一个 4 元素的张量
        boxes_tensor = torch.tensor(boxes, dtype=torch.float32).view(-1, 4)  # 使用 view 转换为 2D 张量
        labels_tensor = torch.tensor(labels, dtype=torch.int64)
        difficult_tensor = torch.tensor(difficults, dtype=torch.uint8)

        # 返回 BoxList
        target = BoxList(boxes_tensor, (width, height), mode="xyxy")
        target.add_field("labels", labels_tensor)
        target.add_field("difficult", difficult_tensor)
        return target


