# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from .box_head.box_head import build_roi_box_head
from .mask_head.mask_head import build_roi_mask_head
from .keypoint_head.keypoint_head import build_roi_keypoint_head


def _gather_regression_by_class(reg, probs):
    """
    reg: (N, 4) if class-agnostic, or (N, 4*K) if class-specific
    probs: (N, K) softmaxed probabilities from the anchor view
    returns: (N, 4) selected regression for each ROI
    """
    if reg.dim() != 2:
        raise ValueError("bbox regression should be 2D")
    N, D = reg.shape
    if N == 0: # 空 proposals，直接返回空
        return reg
    K = probs.shape[1]
    if D == 4: # class-agnostic
        return reg
    # class-specific (N, 4K) -> pick by argmax class from anchor view
    reg = reg.view(N, K, 4)
    cls_idx = probs.argmax(dim=1)  # (N,)
    arange = torch.arange(N, device=reg.device)
    return reg[arange, cls_idx, :]  # (N, 4)


def compute_align_losses(cfg, roi_box_head, proposals, features_views, proposals_views=None):
    """
    proposals: list[BoxList] proposals from anchor view (length = batch)
    features_views: list of feature dicts from backbone for each view, len A >= 2
        features_views[0] is anchor view features (same as used for proposals)
    roi_box_head: the instantiated ROIBoxHead (roi_heads.box)

    Returns: dict with {"loss_align_cls": tensor, "loss_align_reg": tensor}
    """
    if (not cfg.MODEL.DIVALIGN.ENABLE) or (features_views is None) or (len(features_views) <= 1):
        return {}

    alpha = cfg.MODEL.DIVALIGN.ALPHA
    beta  = cfg.MODEL.DIVALIGN.BETA
    use_sym_kl = getattr(cfg.MODEL.DIVALIGN, "USE_SYMMETRIC_KL", False)

    # 1) anchor-view logits/reg
    x_anchor = roi_box_head.feature_extractor(features_views[0], proposals)
    class_logits_anchor, box_reg_anchor = roi_box_head.predictor(x_anchor)
    # 若本 step 没有任何 ROI（N=0），直接返回 0 损失，避免空均值 -> NaN
    N_anchor = class_logits_anchor.shape[0]
    if N_anchor == 0:
        zero = class_logits_anchor.new_tensor(0.0)
        return {"loss_align_cls": zero, "loss_align_reg": zero}
    p_anchor = torch.softmax(class_logits_anchor, dim=1).clamp(min=1e-6)

    # 数值健壮性
    if (not torch.isfinite(p_anchor).all()) or (not torch.isfinite(box_reg_anchor).all()):
        zero = class_logits_anchor.new_tensor(0.0)
        return {"loss_align_cls": zero, "loss_align_reg": zero}
    
    # 2) other views: reuse SAME proposals, but pool on their feature maps
    l_cal = class_logits_anchor.new_tensor(0.0)
    l_ral = class_logits_anchor.new_tensor(0.0)
    num_pairs = 0

    # --- Other views ---
    for v in range(1, len(features_views)):
        pv = None
        if proposals_views is not None and v < len(proposals_views) and proposals_views[v] is not None:
            pv = proposals_views[v]
        if pv is None:
            pv = proposals
        # 提取本视图 ROI 特征并预测
        x_v = roi_box_head.feature_extractor(features_views[v], proposals)
        class_logits_v, box_reg_v = roi_box_head.predictor(x_v)
        # 与 anchor 一致的空检查
        if class_logits_v.shape[0] == 0:
            continue
        N = min(N_anchor, class_logits_v.shape[0])
        class_logits_v = class_logits_v[:N]
        box_reg_v = box_reg_v[:N]
        p_v = torch.softmax(class_logits_v, dim=1).clamp(min=1e-6)
        # 数值健壮性检查
        if (not torch.isfinite(p_v).all()) or (not torch.isfinite(box_reg_v).all()):
            continue
        # 同步截断 anchor 到同样长度 N
        p_anchor_N = p_anchor[:N]
        box_reg_anchor_N = box_reg_anchor[:N]
        # KL(p_anchor || p_v)
        if use_sym_kl:
            l_cal = l_cal + 0.5 * (
                F.kl_div(p_anchor_N.log(), p_v, reduction="batchmean") +
                F.kl_div(p_v.log(), p_anchor_N, reduction="batchmean")
            )
        else:
            l_cal = l_cal + F.kl_div(p_anchor_N.log(), p_v, reduction="batchmean")

        # L2 on bbox regression (class-agnostic or gather by argmax of anchor probs)
        sel_anchor = _gather_regression_by_class(box_reg_anchor_N, p_anchor_N)
        sel_v      = _gather_regression_by_class(box_reg_v,      p_anchor_N)
        # 空张量/非法数值兜底，避免 mse(mean over empty)=nan
        if sel_anchor.numel() == 0 or sel_v.numel() == 0:
            continue
        if (not torch.isfinite(sel_anchor).all()) or (not torch.isfinite(sel_v).all()):
            continue
        l_ral = l_ral + F.mse_loss(sel_anchor, sel_v, reduction="mean")
        num_pairs += 1

    if num_pairs == 0:
        # 没有有效视图对，返回 0
        zero = class_logits_anchor.new_tensor(0.0)
        return {"loss_align_cls": zero, "loss_align_reg": zero}

    l_cal = l_cal / num_pairs
    l_ral = l_ral / num_pairs

    return {
        "loss_align_cls": alpha * l_cal,
        "loss_align_reg": beta  * l_ral,
    }


class CombinedROIHeads(torch.nn.ModuleDict):
    """
    Combines a set of individual heads (for box prediction or masks) into a single
    head.
    """

    def __init__(self, cfg, heads):
        super(CombinedROIHeads, self).__init__(heads)
        self.cfg = cfg.clone()
        if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
            self.mask.feature_extractor = self.box.feature_extractor
        if cfg.MODEL.KEYPOINT_ON and cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
            self.keypoint.feature_extractor = self.box.feature_extractor

    def forward(self, features, proposals, targets=None, **kwargs):
        losses = {}        
        x, detections, loss_box = self.box(features, proposals, targets)
        losses.update(loss_box)

        if self.cfg.MODEL.MASK_ON:
            mask_features = features
            # optimization: during training, if we share the feature extractor between
            # the box and the mask heads, then we can reuse the features already computed
            if (
                self.training
                and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
            ):
                mask_features = x
            # During training, self.box() will return the unaltered proposals as "detections"
            # this makes the API consistent during training and testing
            x, detections, loss_mask = self.mask(mask_features, detections, targets)
            losses.update(loss_mask)

        if self.cfg.MODEL.KEYPOINT_ON:
            keypoint_features = features
            # optimization: during training, if we share the feature extractor between
            # the box and the mask heads, then we can reuse the features already computed
            if (
                self.training
                and self.cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR
            ):
                keypoint_features = x
            # During training, self.box() will return the unaltered proposals as "detections"
            # this makes the API consistent during training and testing
            x, detections, loss_keypoint = self.keypoint(keypoint_features, detections, targets)
            losses.update(loss_keypoint)
          
        align_losses = {}
        features_views = kwargs.get("features_views", None)
        proposals_views = kwargs.get("proposals_views", None)
        try:
            extra = compute_align_losses(self.cfg, self.box, proposals, features_views, proposals_views)
            if self.training and len(extra) > 0:
                align_losses = extra
        except Exception as e:
            if self.cfg.MODEL.DIVALIGN.ENABLE:
                print(f"[DivAlign] align loss computation failed: {e}")
        if self.training and len(align_losses) > 0:
            losses.update(align_losses)
        return x, detections, losses


def build_roi_heads(cfg, in_channels):
    # individually create the heads, that will be combined together afterwards
    roi_heads = []
    if cfg.MODEL.RETINANET_ON:
        return []

    if not cfg.MODEL.RPN_ONLY:
        roi_heads.append(("box", build_roi_box_head(cfg, in_channels)))
    if cfg.MODEL.MASK_ON:
        roi_heads.append(("mask", build_roi_mask_head(cfg, in_channels)))
    if cfg.MODEL.KEYPOINT_ON:
        roi_heads.append(("keypoint", build_roi_keypoint_head(cfg, in_channels)))

    # combine individual heads in a single module
    if roi_heads:
        roi_heads = CombinedROIHeads(cfg, roi_heads)

    return roi_heads
