
import cv2
import os
import xml.etree.ElementTree as ET
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader

SPLIT = ["train", "test"]  

class FLIRPairDataset(Dataset):
    """
    同时读取可见光 + 红外，返回 4 通道图像 (RGB+IR) 和 YOLO 标签
    """

    def __init__(
        self,
        root_dir: str,
        split: str = "train",
        img_size: int = 640,
        augment: bool = True,
    ):
        assert split in SPLIT, f"split must be one of {SPLIT}"
        self.root = root_dir
        self.split = split
        self.img_size = img_size
        self.augment = augment and (split == "train")

        # 图像/标签子目录
        vis_dir = os.path.join(root_dir, "visible", split)
        ir_dir = os.path.join(root_dir, "infrared", split)
        label_dir = os.path.join(root_dir, "labels", split)

        # 1. 收集可见光/红外图
        vis_img_names = {
            os.path.splitext(f)[0]: os.path.join(vis_dir, f)
            for f in os.listdir(vis_dir)
            if f.lower().endswith((".jpg", ".jpeg", ".png"))
        }
        ir_img_names = {
            os.path.splitext(f)[0]: os.path.join(ir_dir, f)
            for f in os.listdir(ir_dir)
            if f.lower().endswith((".jpg", ".jpeg", ".png"))
        }

        # 2. 收集标签文件
        label_names = {
            os.path.splitext(f)[0]: os.path.join(label_dir, f)
            for f in os.listdir(label_dir)
            if f.endswith(".txt")
        }

        # 3. 建立匹配关系
        self.samples = []
        for lbl_base, lbl_path in label_names.items():
            # img_base = lbl_base.replace("_day", "")  # 去掉后缀
            img_base = lbl_base
            vis_path = vis_img_names.get(img_base)
            ir_path = ir_img_names.get(img_base)
            if vis_path and ir_path:
                self.samples.append((vis_path, ir_path, lbl_path))
            else:
                print(f"[WARN] missing images for label: {lbl_base}")

        # Albumentations 增强
        if self.augment:
            self.transform = A.Compose(
                [
                    A.LongestMaxSize(max_size=img_size),
                    A.PadIfNeeded(
                        min_height=img_size,
                        min_width=img_size,
                        border_mode=cv2.BORDER_CONSTANT,
                        value=0,
                    ),
                    A.HorizontalFlip(p=0.5),
                    A.ColorJitter(
                        brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1, p=0.5
                    ),
                    A.GaussNoise(var_limit=(0.0, 10.0), p=0.3),
                    ToTensorV2(),
                ],
                bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"]),
            )
        else:
            self.transform = A.Compose(
                [
                    A.LongestMaxSize(max_size=img_size),
                    A.PadIfNeeded(
                        min_height=img_size,
                        min_width=img_size,
                        border_mode=cv2.BORDER_CONSTANT,
                        value=0,
                    ),
                    ToTensorV2(),
                ],
                bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"]),
            )

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        """ 
            Loader初始化的时候仅仅加载符合条件的对应条目的Path，
            并没有真实的加载其中
        """
        vis_path, ir_path, lbl_path = self.samples[idx]

        # 读取可见光 (BGR -> RGB) & 红外 (单通道)
        vis = cv2.imread(vis_path)[:, :, ::-1]
        ir = cv2.imread(ir_path, cv2.IMREAD_GRAYSCALE)
        ir = np.expand_dims(ir, axis=-1)  # (H, W, 1)

        # 读取标签
        bboxes = []
        labels = []
        with open(lbl_path, "r") as f:
            for line in f.readlines():
                cls, cx, cy, w, h = map(float, line.strip().split())
                bboxes.append([cx, cy, w, h])
                labels.append(int(cls))

        # Albumentations 增强
        transformed = self.transform(
            image=vis, ir=ir, bboxes=bboxes, class_labels=labels
        )
        vis = transformed["image"]  # (3, H, W)
        ir = transformed["ir"].permute(2, 0, 1)  # (1, H, W)

        # 合并 4 通道
        image = torch.cat([vis, ir], dim=0)  # (4, H, W)

        # 构造 target tensor: (N, 5) -> [cls, cx, cy, w, h]
        if len(transformed["bboxes"]):
            bboxes = torch.tensor(transformed["bboxes"], dtype=torch.float32)
            labels = torch.tensor(transformed["class_labels"], dtype=torch.long)
            targets = torch.cat([labels.unsqueeze(1), bboxes], dim=1)
        else:
            targets = torch.empty((0, 5), dtype=torch.float32)

        return {"image": image, "label": targets}


class FLIR_ImagePair_Dataset(Dataset):
    def __init__(self, root_dir: str, split: str = "train", img_size: int = 640):
        assert split in SPLIT, f"split must be one of {SPLIT}"
        self.root = root_dir
        self.split = split
        self.img_size = img_size

        # 目录
        vis_dir = os.path.join(root_dir, "visible", split)
        ir_dir = os.path.join(root_dir, "infrared", split)
        label_dir = os.path.join(root_dir, "labels", split)

        # 1. 收集可见光/红外图
        vis_img = {
            os.path.splitext(f)[0]: os.path.join(vis_dir, f)
            for f in os.listdir(vis_dir)
            if f.lower().endswith((".jpg", ".jpeg", ".png"))
        }
        ir_img = {
            os.path.splitext(f)[0]: os.path.join(ir_dir, f)
            for f in os.listdir(ir_dir)
            if f.lower().endswith((".jpg", ".jpeg", ".png"))
        }

        # 2. 收集标签
        labels = {
            os.path.splitext(f)[0]: os.path.join(label_dir, f)
            for f in os.listdir(label_dir)
            if f.endswith(".txt")
        }

        # 3. 建立匹配
        self.samples = []
        for key, lbl_path in labels.items():
            if key in vis_img and key in ir_img:
                self.samples.append((vis_img[key], ir_img[key], lbl_path))
            else:
                print(f"[WARN] missing images for label: {key}")

    def __len__(self):
        return len(self.samples)

    @staticmethod
    def _resize_pad(im: np.ndarray, size: int) -> np.ndarray:
        h, w = im.shape[:2]
        
        # 新增逻辑：如果尺寸已符合要求，直接返回（仅处理维度）
        if h == size and w == size:
            # 确保图像是三维（H, W, C）格式
            if im.ndim == 2:
                return im[..., None].astype(np.float32)
            else:
                return im.astype(np.float32)
        
        # 原有缩放逻辑
        scale = min(size / h, size / w)
        new_h, new_w = int(h * scale), int(w * scale)

        # 保证三维格式
        if im.ndim == 2:
            im = im[..., None]  # (H,W,1)

        im_resized = cv2.resize(im, (new_w, new_h), interpolation=cv2.INTER_LINEAR)

        canvas = np.zeros((size, size, im.shape[-1]), dtype=np.float32)
        y0 = (size - new_h) // 2
        x0 = (size - new_w) // 2
        
        # 关键修复：确保缩放后的图像仍然保持三维结构
        if im_resized.ndim == 2:
            im_resized = im_resized[..., None]  # 防止某些情况下维度丢失
        canvas[y0:y0 + new_h, x0:x0 + new_w] = im_resized
        
        return canvas



    def __getitem__(self, idx):
        vis_path, ir_path, lbl_path = self.samples[idx]

        # 读取
        vis = cv2.imread(vis_path)[:, :, ::-1]  # BGR→RGB  (H,W,3)
        # 红外
        ir  = cv2.imread(ir_path, cv2.IMREAD_GRAYSCALE)  # (H,W)
        ir  = ir[..., None]                             # (H,W,1)
        
        # resize & pad
        vis = self._resize_pad(vis, self.img_size)  # (Sz,Sz,3)
        ir = self._resize_pad(ir, self.img_size)  # (Sz,Sz,1)

        # HWC → CHW, 归一化到 0-1
        vis = torch.from_numpy(vis).permute(2, 0, 1).float() / 255.0
        ir = torch.from_numpy(ir).permute(2, 0, 1).float() / 255.0

        # 读取标签
        bboxes, labels = [], []
        with open(lbl_path) as f:
            for line in f:
                cls, cx, cy, w, h = map(float, line.strip().split())
                bboxes.append([cx, cy, w, h])
                labels.append(int(cls))
        bboxes = torch.tensor(bboxes, dtype=torch.float32)
        labels = torch.tensor(labels, dtype=torch.long)
        targets = torch.cat([labels[:, None], bboxes], dim=1)  # (N,5)

        return {"vis": vis, "ir": ir, "label": targets}