# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

"""MiniVGGT: A lightweight distilled version of VGGT.

This module directly reuses the Aggregator from VGGT with smaller parameters:
- Uses DINOv2-small (embed_dim=384) instead of DINOv2-large (embed_dim=1024)
- Reduces depth from 24 to 4 layers
- Maintains compatibility with pretrained VGGT prediction heads by outputting
  concatenated frame+global features (2*embed_dim dimension)
"""

from __future__ import annotations

import sys
from pathlib import Path
from dataclasses import dataclass, field
from typing import List, Tuple

# Add parent directory to path to import VGGT modules
REPO_ROOT = Path(__file__).resolve().parents[2]
if str(REPO_ROOT) not in sys.path:
    sys.path.insert(0, str(REPO_ROOT))

from vggt.models.aggregator import Aggregator


@dataclass
class MiniVGGTConfig:
    """Configuration for MiniVGGT architecture.
    
    Attributes:
        img_size: Input image size in pixels
        patch_size: Size of image patches
        embed_dim: Embedding dimension (384 for ViT-small)
        depth: Number of transformer blocks (4 for distillation)
        num_heads: Number of attention heads
        mlp_ratio: MLP hidden dim ratio
        num_register_tokens: Number of register tokens
        patch_embed: Patch embedding type
        aa_order: Alternating attention order
        aa_block_size: Attention block grouping size
        qk_norm: Whether to normalize Q/K
        rope_freq: RoPE frequency (-1 to disable)
        init_values: Layer scale init value
    """
    img_size: int = 518
    patch_size: int = 14
    embed_dim: int = 384
    depth: int = 4
    num_heads: int = 6
    mlp_ratio: float = 4.0
    num_register_tokens: int = 4
    qkv_bias: bool = True
    proj_bias: bool = True
    ffn_bias: bool = True
    patch_embed: str = "dinov2_vits14_reg"
    aa_order: List[str] = field(default_factory=lambda: ["frame", "global"])
    aa_block_size: int = 1
    qk_norm: bool = True
    rope_freq: int = 100
    init_values: float = 0.01


class MiniVGGT(Aggregator):
    """Lightweight VGGT with alternating attention (4 layers, ViT-small backbone).
    
    This model directly inherits from Aggregator and uses the same architecture
    with smaller parameters for knowledge distillation.
    
    Args:
        config: MiniVGGTConfig instance (defaults to MiniVGGT settings)
    """

    def __init__(self, config: MiniVGGTConfig = None):
        """Initialize MiniVGGT by calling Aggregator with distillation parameters."""
        self.config = config or MiniVGGTConfig()
        
        # Initialize parent Aggregator with MiniVGGT parameters
        super().__init__(
            img_size=self.config.img_size,
            patch_size=self.config.patch_size,
            embed_dim=self.config.embed_dim,
            depth=self.config.depth,
            num_heads=self.config.num_heads,
            mlp_ratio=self.config.mlp_ratio,
            num_register_tokens=self.config.num_register_tokens,
            qkv_bias=self.config.qkv_bias,
            proj_bias=self.config.proj_bias,
            ffn_bias=self.config.ffn_bias,
            patch_embed=self.config.patch_embed,
            aa_order=self.config.aa_order,
            aa_block_size=self.config.aa_block_size,
            qk_norm=self.config.qk_norm,
            rope_freq=self.config.rope_freq,
            init_values=self.config.init_values,
        )
    
    def forward_features(self, images):
        """Alias for forward() to maintain compatibility with training script.
        
        Args:
            images: [B, S, 3, H, W] or [S, 3, H, W], range [0, 1]
        
        Returns:
            (aggregated_tokens_list, patch_start_idx):
                - aggregated_tokens_list: List of 4 concat tensors [B, S, P, 2*C]
                - patch_start_idx: Index offset for patch tokens
        """
        return super().forward(images)
