# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.structures.image_list import to_image_list

class ImageList5D(object):
    """A minimal wrapper mimicking ImageList, but for 5D tensors (B, A, C, H, W)."""
    def __init__(self, tensors, image_sizes_ba):
        # tensors: (B, A, C, H, W)
        # image_sizes_ba: list[list[(H,W)]]  # per-sample per-view original sizes
        self.tensors = tensors
        self.image_sizes_ba = image_sizes_ba
        self.image_sizes = [sizes[0] for sizes in image_sizes_ba] if image_sizes_ba else []

    def to(self, device, non_blocking=False):
        return ImageList5D(self.tensors.to(device, non_blocking=non_blocking), self.image_sizes_ba)

    def __len__(self):
        return self.tensors.size(0)

class BatchCollator(object):
    def __init__(self, size_divisible=0):
        self.size_divisible = size_divisible

    def __call__(self, batch):
        # batch: list of tuples -> (images, targets, ids)
        transposed_batch = list(zip(*batch))
        images, targets, ids = transposed_batch[0], transposed_batch[1], transposed_batch[2]

        # --- 检测是否是“多视图” ---
        # 1) Dataset 返回 Tensor(A,C,H,W)
        is_mv_tensor = torch.is_tensor(images[0]) and images[0].dim() == 4
        # 2) 或 Dataset 返回 [Tensor(C,H,W), ...] 长度 A
        is_mv_list = isinstance(images[0], (list, tuple)) and (
            len(images[0]) > 0 and torch.is_tensor(images[0][0]) and images[0][0].dim() == 3
        )

        if is_mv_tensor or is_mv_list:
            # -------- 多视图分支：堆成 (B,A,C,H,W) --------
            views_list = []
            sizes_list = []  # 可选：记录每张视图的原 H,W
            A = None
            C = None
            H_max = 0
            W_max = 0

            # 统一每个样本为 Tensor(A,C,H,W)
            for x in images:
                if isinstance(x, (list, tuple)):
                    x = torch.stack(x, dim=0)  # (A,C,H,W)
                assert x.dim() == 4, f"Expect (A,C,H,W), got {tuple(x.shape)}"
                if A is None:
                    A, C = x.size(0), x.size(1)
                else:
                    # 同一 batch 内 A、C 应一致
                    assert x.size(0) == A and x.size(1) == C, "Inconsistent A/C among samples"

                H_max = max(H_max, int(x.size(2)))
                W_max = max(W_max, int(x.size(3)))
                # 记录每视图的原尺寸（供需要时使用）
                sizes_list.append([(int(x.size(2)), int(x.size(3))) for _ in range(x.size(0))])
                views_list.append(x)

            # 对齐到 size_divisible（和老逻辑一致）
            if self.size_divisible > 0:
                stride = int(self.size_divisible)
                H_max = (H_max + stride - 1) // stride * stride
                W_max = (W_max + stride - 1) // stride * stride

            # pad 每个样本到 (A,C,H_max,W_max)
            padded = []
            for x in views_list:
                pad_h = H_max - x.size(2)
                pad_w = W_max - x.size(3)
                if pad_h or pad_w:
                    x = torch.nn.functional.pad(x, (0, pad_w, 0, pad_h))
                padded.append(x)

            # 堆成 (B,A,C,H,W)
            images_5d = torch.stack(padded, dim=0)
            images = ImageList5D(images_5d, sizes_list)

        else:
            # -------- 单视图分支：老逻辑 --------
            # images = to_image_list(images, self.size_divisible)

            # -------- 统一封装为 ImageList5D (A=1) --------
            imglist = to_image_list(images, self.size_divisible)   # ImageList, (B,C,H,W)
            t = imglist.tensors
            assert t.dim() == 4, f"Expect (B,C,H,W), got {tuple(t.shape)}"
            B, C, H, W = t.shape
            # 封装为 (B,1,C,H,W) 的 5D
            t5 = t.unsqueeze(1)  # (B,1,C,H,W)
            # 构造 image_sizes_ba: 每个样本只有 1 个视图
            image_sizes_ba = [[tuple(imglist.image_sizes[i])] for i in range(B)]
            images = ImageList5D(t5, image_sizes_ba)
        return images, targets, ids


class BBoxAugCollator(object):
    """
    From a list of samples from the dataset,
    returns the images and targets.
    Images should be converted to batched images in `im_detect_bbox_aug`
    """

    def __call__(self, batch):
        return list(zip(*batch))

