"""Distilled VGGT student model for real-time inference."""

from __future__ import annotations

from dataclasses import dataclass
from typing import Any, Dict, Optional

import torch
import torch.nn as nn

from vggt.heads.camera_head import CameraHead
from vggt.heads.dpt_head import DPTHead
from vggt.heads.track_head import TrackHead
from importlib import import_module


@dataclass
class DistilledConfig:
    """Convenience container for the distilled VGGT hyper-parameters."""

    img_size: int = 518
    patch_size: int = 14
    embed_dim: int = 384
    frame_depth: int = 4
    global_depth: int = 4
    num_heads: int = 6
    num_register_tokens: int = 4
    mlp_ratio: float = 4.0
    enable_camera: bool = True
    enable_depth: bool = True
    enable_point: bool = False
    enable_track: bool = False


class DistilledVGGT(nn.Module):
    """Student VGGT variant distilled from the 24-layer teacher."""

    def __init__(self, cfg: Optional[DistilledConfig] = None, **overrides: Any) -> None:
        super().__init__()
        cfg = self._merge_config(cfg, overrides)
        aggregator_module = import_module("Distill.vggt_distilled.aggregator")
        LightAggregator = aggregator_module.LightAggregator

        self.aggregator = LightAggregator(
            img_size=cfg.img_size,
            patch_size=cfg.patch_size,
            embed_dim=cfg.embed_dim,
            frame_depth=cfg.frame_depth,
            global_depth=cfg.global_depth,
            num_heads=cfg.num_heads,
            mlp_ratio=cfg.mlp_ratio,
            num_register_tokens=cfg.num_register_tokens,
        )

        token_dim = cfg.embed_dim * 2
        patch_size = cfg.patch_size

        self.camera_head = CameraHead(dim_in=token_dim) if cfg.enable_camera else None

        depth_head_cfg: Dict[str, Any] = dict(
            dim_in=token_dim,
            patch_size=patch_size,
            output_dim=2,
            activation="exp",
            conf_activation="expp1",
            intermediate_layer_idx=list(range(cfg.frame_depth + cfg.global_depth)),
        )
        self.depth_head = DPTHead(**depth_head_cfg) if cfg.enable_depth else None

        point_head_cfg: Dict[str, Any] = dict(
            dim_in=token_dim,
            patch_size=patch_size,
            output_dim=4,
            intermediate_layer_idx=list(range(cfg.frame_depth + cfg.global_depth)),
        )
        self.point_head = DPTHead(**point_head_cfg) if cfg.enable_point else None

        if cfg.enable_track:
            track_cfg: Dict[str, Any] = dict(dim_in=token_dim, patch_size=patch_size)
            self.track_head = TrackHead(**track_cfg)
        else:
            self.track_head = None

    def forward(
        self,
        images: torch.Tensor,
        query_points: Optional[torch.Tensor] = None,
        return_intermediate: bool = False,
    ) -> Dict[str, Any]:
        if images.dim() == 4:
            images = images.unsqueeze(0)
        if images.dim() != 5:
            raise ValueError("Expected images tensor of shape [B, S, 3, H, W]")

        if query_points is not None and query_points.dim() == 2:
            query_points = query_points.unsqueeze(0)

        tokens_list, patch_start_idx = self.aggregator(images, return_detailed=return_intermediate)
        detailed = self.aggregator.get_last_detailed_outputs() if return_intermediate else None
        aggregated_tokens = detailed["tokens"] if detailed is not None else tokens_list

        outputs: Dict[str, Any] = {}

        with torch.cuda.amp.autocast(enabled=False):
            if self.camera_head is not None:
                pose_enc_list = self.camera_head(aggregated_tokens)
                outputs["pose_enc"] = pose_enc_list[-1]
                outputs["pose_enc_list"] = pose_enc_list

            if self.depth_head is not None:
                depth, depth_conf = self.depth_head(aggregated_tokens, images=images, patch_start_idx=patch_start_idx)
                outputs["depth"] = depth
                outputs["depth_conf"] = depth_conf

            if self.point_head is not None:
                world_points, world_conf = self.point_head(
                    aggregated_tokens, images=images, patch_start_idx=patch_start_idx
                )
                outputs["world_points"] = world_points
                outputs["world_points_conf"] = world_conf

        if self.track_head is not None and query_points is not None:
            track_list, vis, conf = self.track_head(
                aggregated_tokens, images=images, patch_start_idx=patch_start_idx, query_points=query_points
            )
            outputs["track"] = track_list[-1]
            outputs["vis"] = vis
            outputs["conf"] = conf

        if not self.training:
            outputs["images"] = images

        if return_intermediate and detailed is not None:
            outputs.update(detailed)

        return outputs

    @staticmethod
    def _merge_config(cfg: Optional[DistilledConfig], overrides: Dict[str, Any]) -> DistilledConfig:
        base = DistilledConfig() if cfg is None else cfg
        if overrides:
            for key, value in overrides.items():
                if not hasattr(base, key):
                    raise AttributeError(f"Unknown config key '{key}'")
                setattr(base, key, value)
        return base
