"""Lightweight aggregator with reduced alternating attention depth."""

from __future__ import annotations

from functools import partial
from typing import Dict, List, Optional, Tuple

import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint

from vggt.layers import MemEffAttention
from vggt.layers.block import Block
from vggt.layers.rope import PositionGetter, RotaryPositionEmbedding2D
from vggt.layers.vision_transformer import DinoVisionTransformer


_RESNET_MEAN = [0.485, 0.456, 0.406]
_RESNET_STD = [0.229, 0.224, 0.225]


def build_dinov3_vits14(**overrides: Dict[str, float]) -> DinoVisionTransformer:
    """Construct a ViT-S/14 backbone that mimics the DINOv3 setting."""

    defaults = dict(
        patch_size=14,
        embed_dim=384,
        depth=12,
        num_heads=6,
        mlp_ratio=4,
        block_fn=partial(Block, attn_class=MemEffAttention),
        ffn_layer="swiglufused",
        drop_path_rate=0.4,
        qk_norm=True,
        num_register_tokens=4,
        interpolate_antialias=True,
    )
    defaults.update(overrides)

    model = DinoVisionTransformer(**defaults)
    if hasattr(model, "mask_token"):
        model.mask_token.requires_grad_(False)
    return model


def _slice_expand_and_flatten(token_tensor: torch.Tensor, batch: int, seq: int) -> torch.Tensor:
    """Matches the camera/register token handling from the original VGGT aggregator."""

    first = token_tensor[:, 0:1, ...].expand(batch, 1, *token_tensor.shape[2:])
    rest = token_tensor[:, 1:, ...].expand(batch, seq - 1, *token_tensor.shape[2:])
    combined = torch.cat([first, rest], dim=1)
    return combined.view(batch * seq, *combined.shape[2:])


class LightAggregator(nn.Module):
    """Reduced VGGT aggregator with DINOv3 ViT-S patch encoder and 4+4 attention blocks."""

    def __init__(
        self,
        img_size: int = 518,
        patch_size: int = 14,
        embed_dim: int = 384,
        frame_depth: int = 4,
        global_depth: int = 4,
        num_heads: int = 6,
        mlp_ratio: float = 4.0,
        num_register_tokens: int = 4,
        qkv_bias: bool = True,
        proj_bias: bool = True,
        ffn_bias: bool = True,
        rope_freq: int = 100,
        init_values: float = 0.01,
        use_checkpoint: bool = True,
    ) -> None:
        super().__init__()

        if frame_depth <= 0 or global_depth <= 0:
            raise ValueError("frame_depth and global_depth must be positive")

        # Patch encoder swapped to DINOv3 ViT-S/14 by default.
        self.patch_embed = build_dinov3_vits14(img_size=img_size, num_register_tokens=num_register_tokens)
        self.patch_size = patch_size

        # Frame and global alternating attention blocks.
        block_kwargs = dict(
            dim=embed_dim,
            num_heads=num_heads,
            mlp_ratio=mlp_ratio,
            qkv_bias=qkv_bias,
            proj_bias=proj_bias,
            ffn_bias=ffn_bias,
            init_values=init_values,
        )
        self.frame_blocks = nn.ModuleList(
            [Block(**block_kwargs) for _ in range(frame_depth)]
        )
        self.global_blocks = nn.ModuleList(
            [Block(**block_kwargs) for _ in range(global_depth)]
        )

        self.frame_depth = frame_depth
        self.global_depth = global_depth
        self.embed_dim = embed_dim
        self.use_checkpoint = use_checkpoint
        self.use_reentrant = False

        # Rotary positional embedding for patch tokens.
        self.rope = RotaryPositionEmbedding2D(frequency=rope_freq) if rope_freq > 0 else None
        self.position_getter = PositionGetter() if self.rope is not None else None

        # Camera/register tokens follow the original layout (two variants for first/other frames).
        self.camera_token = nn.Parameter(torch.randn(1, 2, 1, embed_dim))
        self.register_token = nn.Parameter(torch.randn(1, 2, num_register_tokens, embed_dim))
        nn.init.normal_(self.camera_token, std=1e-6)
        nn.init.normal_(self.register_token, std=1e-6)

        self.patch_start_idx = 1 + num_register_tokens

        mean = torch.tensor(_RESNET_MEAN).view(1, 1, 3, 1, 1)
        std = torch.tensor(_RESNET_STD).view(1, 1, 3, 1, 1)
        self.register_buffer("_resnet_mean", mean, persistent=False)
        self.register_buffer("_resnet_std", std, persistent=False)

        self._last_detailed: Optional[Dict[str, List[torch.Tensor]]] = None

    def forward(
        self,
        images: torch.Tensor,
        return_detailed: bool = False,
    ) -> Tuple[List[torch.Tensor], int]:
        if images.dim() != 5:
            raise ValueError("Expected image tensor of shape [B, S, 3, H, W]")

        batch, seq, channels, height, width = images.shape
        if channels != 3:
            raise ValueError("Expected RGB images")

        normed = (images - self._resnet_mean) / self._resnet_std
        patch_tokens = self.patch_embed(normed.view(batch * seq, channels, height, width))
        if isinstance(patch_tokens, dict):
            patch_tokens = patch_tokens["x_norm_patchtokens"]

        _, num_patches, dim = patch_tokens.shape

        camera_token = _slice_expand_and_flatten(self.camera_token, batch, seq)
        register_token = _slice_expand_and_flatten(self.register_token, batch, seq)

        tokens = torch.cat([camera_token, register_token, patch_tokens], dim=1)

        pos = None
        if self.rope is not None:
            pos = self.position_getter(batch * seq, height // self.patch_size, width // self.patch_size, device=images.device)
            pos = pos + 1
            pos_special = torch.zeros(batch * seq, self.patch_start_idx, 2, device=images.device, dtype=pos.dtype)
            pos = torch.cat([pos_special, pos], dim=1)

        outputs: List[torch.Tensor] = []
        frame_tokens_history: List[torch.Tensor] = []
        global_tokens_history: List[torch.Tensor] = []

        tokens_bs = tokens
        frame_idx = 0
        global_idx = 0

        iterations = max(self.frame_depth, self.global_depth)

        for iter_idx in range(iterations):
            if frame_idx < self.frame_depth:
                tokens_bs, frame_feat = self._run_frame_block(tokens_bs, batch, seq, num_patches + self.patch_start_idx, dim, frame_idx, pos)
                frame_tokens_history.append(frame_feat)
                frame_idx += 1
            if global_idx < self.global_depth:
                tokens_bs, global_feat = self._run_global_block(tokens_bs, batch, seq, num_patches + self.patch_start_idx, dim, global_idx, pos)
                global_tokens_history.append(global_feat)
                global_idx += 1

            # Align the latest frame/global tensors for aggregation; fall back to last seen
            frame_tensor = frame_tokens_history[-1]
            global_tensor = global_tokens_history[-1] if global_tokens_history else frame_tensor
            outputs.append(torch.cat([frame_tensor, global_tensor], dim=-1))

        if return_detailed:
            self._last_detailed = {
                "tokens": outputs,
                "frame_tokens": frame_tokens_history,
                "global_tokens": global_tokens_history,
                "patch_tokens": patch_tokens.view(batch, seq, num_patches, dim),
                "camera_tokens": camera_token.view(batch, seq, 1, dim),
                "register_tokens": register_token.view(batch, seq, -1, dim),
            }
        else:
            self._last_detailed = None

        return outputs, self.patch_start_idx

    def _run_frame_block(
        self,
        tokens: torch.Tensor,
        batch: int,
        seq: int,
        token_count: int,
        dim: int,
        idx: int,
        pos: Optional[torch.Tensor],
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        if tokens.shape != (batch * seq, token_count, dim):
            tokens = tokens.view(batch, seq, token_count, dim).view(batch * seq, token_count, dim)
        if pos is not None and pos.shape != (batch * seq, token_count, 2):
            pos = pos.view(batch, seq, token_count, 2).view(batch * seq, token_count, 2)

        block = self.frame_blocks[idx]
        if self.training and self.use_checkpoint:
            tokens = checkpoint(block, tokens, pos, use_reentrant=self.use_reentrant)
        else:
            tokens = block(tokens, pos=pos)
        return tokens, tokens.view(batch, seq, token_count, dim)

    def _run_global_block(
        self,
        tokens: torch.Tensor,
        batch: int,
        seq: int,
        token_count: int,
        dim: int,
        idx: int,
        pos: Optional[torch.Tensor],
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        tokens = tokens.view(batch, seq * token_count, dim)
        if pos is not None and pos.shape != (batch, seq * token_count, 2):
            pos = pos.view(batch, seq * token_count, 2)

        block = self.global_blocks[idx]
        if self.training and self.use_checkpoint:
            tokens = checkpoint(block, tokens, pos, use_reentrant=self.use_reentrant)
        else:
            tokens = block(tokens, pos=pos)

        tokens = tokens.view(batch, seq, token_count, dim)
        return tokens.view(batch * seq, token_count, dim), tokens

    def get_last_detailed_outputs(self) -> Optional[Dict[str, List[torch.Tensor]]]:
        return self._last_detailed
