import torch
import torch.nn.functional as F
from typing import Dict, List, Optional, Tuple, Any


class DistillationLoss(torch.nn.Module):
    """Implements frame-attn, global-attn and geometry auxiliary losses.

    Usage: instantiate with lists of layer indices to supervise and weights.
    """

    def __init__(
        self,
        frame_layers: Optional[List[int]] = None,
        global_layers: Optional[List[int]] = None,
        w_frame: float = 1.0,
        w_global: float = 1.0,
        w_geo: float = 1.0,
        attn_loss_type: str = "mse",  # or 'kl'
    ) -> None:
        super().__init__()
        self.frame_layers = frame_layers or []
        self.global_layers = global_layers or []
        self.w_frame = w_frame
        self.w_global = w_global
        self.w_geo = w_geo
        self.attn_loss_type = attn_loss_type

    def forward(self, student_outputs: Dict[str, Any], teacher_outputs: Dict[str, Any]) -> Dict[str, torch.Tensor]:
        """Compute losses given dicts from dataset / model that contain:
        - frame_attn: dict[layer] -> tensor (B, S, P, P) or (S,P,P)
        - global_attn: dict[layer] -> tensor (B, S*P, S*P)
        - depth/points: geometry outputs
        """
        loss_dict: Dict[str, torch.Tensor] = {}
        total = 0.0

        # Frame attention alignment
        frame_loss = 0.0
        for L in self.frame_layers:
            t_attn = self._get_tensor(teacher_outputs["frame_attn"], L)
            s_attn = self._get_tensor(student_outputs.get("frame_attn", {}), L)
            if t_attn is None or s_attn is None:
                continue
            frame_loss_layer = self._attn_loss(s_attn, t_attn)
            loss_dict[f"loss_frame_layer{L}"] = frame_loss_layer
            frame_loss += frame_loss_layer

        # Global attention alignment
        global_loss = 0.0
        for L in self.global_layers:
            t_attn = self._get_tensor(teacher_outputs["global_attn"], L)
            s_attn = self._get_tensor(student_outputs.get("global_attn", {}), L)
            if t_attn is None or s_attn is None:
                continue
            global_loss_layer = self._attn_loss(s_attn, t_attn)
            loss_dict[f"loss_global_layer{L}"] = global_loss_layer
            global_loss += global_loss_layer

        # Geometry auxiliary losses (depth + points)
        geo_loss = 0.0
        # depth
        t_depth = teacher_outputs.get("depth")
        s_depth = student_outputs.get("depth")
        if t_depth is not None and s_depth is not None:
            geo_loss_depth = F.l1_loss(s_depth, t_depth)
            loss_dict["loss_geo_depth"] = geo_loss_depth
            geo_loss += geo_loss_depth

        # points
        t_points = teacher_outputs.get("points")
        s_points = student_outputs.get("points")
        if t_points is not None and s_points is not None:
            # ensure same shape or flatten to compare
            if t_points.shape != s_points.shape:
                # try to flatten
                tflat = t_points.view(t_points.shape[0], -1)
                sflat = s_points.view(s_points.shape[0], -1)
                geo_loss_points = F.l1_loss(sflat, tflat)
            else:
                geo_loss_points = F.l1_loss(s_points, t_points)
            loss_dict["loss_geo_points"] = geo_loss_points
            geo_loss += geo_loss_points

        loss_dict["loss_frame"] = frame_loss
        loss_dict["loss_global"] = global_loss
        loss_dict["loss_geo"] = geo_loss

        total = self.w_frame * frame_loss + self.w_global * global_loss + self.w_geo * geo_loss
        loss_dict["objective"] = total
        return loss_dict

    def _get_tensor(self, attn_dict: Dict[int, torch.Tensor], L: int) -> Optional[torch.Tensor]:
        if attn_dict is None:
            return None
        return attn_dict.get(L)

    def _attn_loss(self, s: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
        # Expect inputs in floats, either shapes (B,S,P,P) or (S,P,P) or (B,SP,SP)
        # Normalize to probability along last dim for KL; for MSE compare directly
        if self.attn_loss_type == "kl":
            # add eps for numerical stability
            eps = 1e-6
            s_log = F.log_softmax(s + eps, dim=-1)
            t_soft = F.softmax(t, dim=-1)
            return F.kl_div(s_log, t_soft, reduction="batchmean")
        else:
            return F.mse_loss(s, t)
