# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

from functools import partial
import gc
import logging
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Union, List, Dict, Any

from fast3r.models.fast3r import Fast3RDecoder
from fast3r.croco.models.pos_embed import RoPE2D, get_1d_sincos_pos_embed_from_grid, get_2d_sincos_pos_embed
# from fast3r.croco.models.blocks import Block, PositionGetter
# from vggt_model.layers import PatchEmbed
from vggt_model.layers.block import Block
# from vggt_model.layers.rope import RotaryPositionEmbedding2D, PositionGetter
# from vggt_model.layers.vision_transformer import vit_small, vit_base, vit_large, vit_giant2

logger = logging.getLogger(__name__)

_RESNET_MEAN = [0.485, 0.456, 0.406]
_RESNET_STD = [0.229, 0.224, 0.225]


class VggtDecoder(nn.Module):
    sincos_pos_cache: Dict[Tuple, torch.Tensor] = {}
    """
    The Aggregator applies alternating-attention over input frames,
    as described in VGGT: Visual Geometry Grounded Transformer.


    Args:
        img_size (int): Image size in pixels.
        patch_size (int): Size of each patch for PatchEmbed.
        embed_dim (int): Dimension of the token embeddings.
        depth (int): Number of blocks.
        num_heads (int): Number of attention heads.
        mlp_ratio (float): Ratio of MLP hidden dim to embedding dim.
        num_register_tokens (int): Number of register tokens.
        block_fn (nn.Module): The block type used for attention (Block by default).
        qkv_bias (bool): Whether to include bias in QKV projections.
        proj_bias (bool): Whether to include bias in the output projection.
        ffn_bias (bool): Whether to include bias in MLP layers.
        patch_embed (str): Type of patch embed. e.g., "conv" or "dinov2_vitl14_reg".
        aa_order (list[str]): The order of alternating attention, e.g. ["frame", "global"].
        aa_block_size (int): How many blocks to group under each attention type before switching. If not necessary, set to 1.
        qk_norm (bool): Whether to apply QK normalization.
        rope_freq (int): Base frequency for rotary embedding. -1 to disable.
        init_values (float): Init scale for layer scale.
    """

    def __init__(
        self,
        random_image_idx_embedding: bool,
        img_size=518,
        enc_embed_dim: int = 768,
        pos_embed="",
        embed_dim=1024,
        depth=24,
        num_heads=16,
        mlp_ratio=4.0,
        drop: float = 0.0,
        num_register_tokens=4,
        block_fn=Block,
        qkv_bias=True,
        proj_bias=True,
        ffn_bias=True,
        aa_order=["frame", "global"],
        aa_block_size=1,
        qk_norm=True,
        init_values=0.01,
        has_img_pos=True,
        attn_drop: float = 0.0,
        attn_implementation: str = "pytorch_naive",
        attn_bias_for_inference_enabled=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6),
        token_offset=2,
        vggt_camera=True,
    ):
        super().__init__()

        if enc_embed_dim != embed_dim:
            self.decoder_embed = nn.Linear(enc_embed_dim, embed_dim, bias=True)
        else:
            self.decoder_embed = None

        # Initialize rotary position embedding if frequency > 0
        if pos_embed.startswith("RoPE"):  # eg RoPE100
            if RoPE2D is None:
                raise ImportError(
                    "Cannot find cuRoPE2D, please install it following the README instructions"
                )
            freq = float(pos_embed[len("RoPE") :])
            self.rope = RoPE2D(freq=freq)
        else:
            self.rope = None
            print("No positional embedding for decoder")
            # raise NotImplementedError("Unknown pos_embed " + pos_embed)

        if pos_embed.startswith("Token"):
            self.pos_embed = nn.Parameter(torch.zeros(1, 1370, embed_dim)) #//2
            from torch.nn.init import trunc_normal_
            trunc_normal_(self.pos_embed, std=0.02)
        else:
            self.pos_embed = None
        # self.token_offset = token_offset

        if pos_embed.startswith("cosine"):
            dec_pos_embed = get_2d_sincos_pos_embed(embed_dim, int(1369**0.5), n_cls_token=1) #//2
            self.register_buffer(
                "cos_pos_embed", torch.from_numpy(dec_pos_embed).unsqueeze(0).float(),persistent=False,
            )
        else:
            self.cos_pos_embed = None

        self.frame_blocks = nn.ModuleList(
            [
                block_fn(
                    dim=embed_dim,
                    num_heads=num_heads,
                    mlp_ratio=mlp_ratio,
                    qkv_bias=qkv_bias,
                    proj_bias=proj_bias,
                    ffn_bias=ffn_bias,
                    init_values=init_values,
                    qk_norm=qk_norm,
                    rope=self.rope,
                )
                for _ in range(depth)
            ]
        )

        self.global_blocks = nn.ModuleList(
            [
                block_fn(
                    dim=embed_dim,
                    num_heads=num_heads,
                    mlp_ratio=mlp_ratio,
                    qkv_bias=qkv_bias,
                    proj_bias=proj_bias,
                    ffn_bias=ffn_bias,
                    init_values=init_values,
                    qk_norm=qk_norm,
                    rope=self.rope,
                )
                for _ in range(depth)
            ]
        )

        self.depth = depth
        self.aa_order = aa_order
        self.aa_block_size = aa_block_size
        self.vggt_camera = vggt_camera

        # Validate that depth is divisible by aa_block_size
        if self.depth % self.aa_block_size != 0:
            raise ValueError(f"depth ({depth}) must be divisible by aa_block_size ({aa_block_size})")

        self.aa_block_num = self.depth // self.aa_block_size

        if vggt_camera:
            # Note: We have two camera tokens, one for the first frame and one for the rest
            # The same applies for register tokens
            self.camera_token = nn.Parameter(torch.randn(1, 2, 1, embed_dim)*1e-2)
            self.register_token = nn.Parameter(torch.randn(1, 2, num_register_tokens, embed_dim)*1e-2)
        else:
            self.camera_token = nn.Parameter(torch.randn(1, 1, 1, embed_dim)*1e-2)
            self.register_token = nn.Parameter(torch.randn(1, 1, num_register_tokens, embed_dim)*1e-2)

        # # The patch tokens start after the camera and register tokens
        self.patch_start_idx = 1 + num_register_tokens

        # # Initialize parameters with small values
        # nn.init.normal_(self.camera_token, std=1e-6)
        # nn.init.normal_(self.register_token, std=1e-6)

        self.has_img_pos = has_img_pos
        if has_img_pos:
                # initialize the positional embedding for the decoder
            self.random_image_idx_embedding = random_image_idx_embedding
            self.register_buffer(
                "image_idx_emb",
                torch.from_numpy(
                    get_1d_sincos_pos_embed_from_grid(embed_dim, np.arange(10))
                ).float(),
                persistent=False,
            )

        # Register normalization constants as buffers
        for name, value in (
            ("_resnet_mean", _RESNET_MEAN),
            ("_resnet_std", _RESNET_STD),
        ):
            self.register_buffer(
                name,
                torch.FloatTensor(value).view(1, 1, 3, 1, 1),
                persistent=False,
            )

  
    def forward(
        self,
        encoded_feats, positions, image_ids
    ) -> List[torch.Tensor]:
        """
        Args:
            encoded_feats (list of tensors): Encoded features for each view. Shape: B x Npatches x D
            positions (list of tensors): Positional embeddings for each view. Shape: B x Npatches x 2
            image_ids (tensor): Image IDs for each patch. Shape: B x Npatches

        Returns:
            (list[torch.Tensor], int):
                The list of outputs from the attention blocks,
                and the patch_start_idx indicating where patch tokens begin.
        """
        # B, S, C_in, H, W = images.shape

        # if C_in != 3:
        #     raise ValueError(f"Expected 3 input channels, got {C_in}")

        # # Normalize images and reshape for patch embed
        # images = (images - self._resnet_mean) / self._resnet_std

        # # Reshape to [B*S, C, H, W] for patch embedding
        # images = images.view(B * S, C_in, H, W)
        # patch_tokens = self.patch_embed(images)

        # if isinstance(patch_tokens, dict):
        #     patch_tokens = patch_tokens["x_norm_patchtokens"]
        B, P, C = encoded_feats[0].shape
        S = len(encoded_feats)
        # print(B, P, C, S)

        tokens = torch.stack(encoded_feats, dim=1)  # concate along the patch dimension
        pos = torch.stack(positions, dim=1)
        # print((torch.stack(encoded_feats, dim=1).reshape(encoded_feats[0].shape[0], -1, encoded_feats[0].shape[-1]) - torch.cat(encoded_feats, dim=1)).max())
        # Expand camera and register tokens to match batch size and sequence length
        camera_token = slice_expand_and_flatten(self.camera_token, B, S, vggt=self.vggt_camera)
        register_token = slice_expand_and_flatten(self.register_token, B, S, vggt=self.vggt_camera)
        tokens = torch.cat([camera_token, register_token, tokens], dim=2)
        B, S, P, C = tokens.shape
        # print(tokens.shape)
        # make a zero position for the camera token
        # pose_feat.shape[1] is number of tokens
        if self.patch_start_idx > 0:
            # do not use position embedding for special tokens (camera and register tokens)
            # so set pos to 0 for the special tokens
            pos_special = torch.zeros(B, S, self.patch_start_idx, 2).to(tokens.device).to(pos.dtype)
            pos = torch.cat([pos_special, pos + 1], dim=2)

            # print(pos.shape)
        if self.decoder_embed is not None:
            tokens = self.decoder_embed(tokens)

        h = torch.max(positions[0][0, :, 0]).item()
        w = torch.max(positions[0][0, :, 1]).item() 
        
        h += 1
        w += 1
        if self.pos_embed is not None:    
            pos_encoding = Fast3RDecoder.interpolate_pos_encoding(tokens, w, h, self.pos_embed.float(), n_views=1, token_offset=self.patch_start_idx, n_cls_token=1)
            # print(pos_encoding.shape)
            # print(x.shape, pos_encoding.shape, h, w)
            tokens += pos_encoding
            # sys.exit(0)[:, :, :x.shape[-1]//2]

        if self.cos_pos_embed is not None:
            # print(self.cos_pos_embed.shape)
            # tokens: [B=8, S=4, P=294, C=1024]
            pos_encoding = Fast3RDecoder.interpolate_pos_encoding(tokens, w, h, self.cos_pos_embed, n_views=1, token_offset=self.patch_start_idx, n_cls_token=1, cache=True)
            tokens += pos_encoding
        # sys.exit(0)[:, :, :x.shape[-1]//2]

        if self.has_img_pos:
            # Initialize an empty list to collect image IDs for each patch.
            # Note that at inference time, different views may have different number of patches.
            # Add positional embedding based on image IDs
            if self.random_image_idx_embedding:

                # Generate random positional embeddings for all views and samples
                image_pos = self._get_random_image_pos(num_patches=P,
                                                    batch_size=B,
                                                    num_views=S,
                                                    max_image_idx=self.image_idx_emb.shape[0] - 1,
                                                    device=tokens.device)
            else:
                # Use default image IDs from input
                image_ids = []

                # Loop through each encoded feature to get the actual number of patches
                # for i, encoded_feat in enumerate(encoded_feats):
                #     num_patches = encoded_feat.shape[1]  # Get the number of patches for this image
                #     # Extend the image_ids list with the current image ID repeated num_patches times
                #     image_ids.extend([i] * num_patches)
                

                # # Repeat the image_ids list B times and reshape it to match the expected shape
                # image_ids = torch.tensor(image_ids*B).reshape(B, -1, P).to(encoded_feats[0].device)
                image_ids = torch.arange(0, S, dtype=torch.int).reshape(1, -1, 1).to(encoded_feats[0].device)
                # print(image_ids.shape, image_ids2.shape)
                # print(image_ids.dtype)
                # print((image_ids==image_ids2).all())
                # num_images = (torch.max(image_ids) + 1).cpu().item()
                # image_idx_emb = self.image_idx_emb[:num_images]
                image_pos = self.image_idx_emb[image_ids]

            # Apply positional embedding based on image IDs and positions[:, :, x.shape[-1]//2:]
            tokens += image_pos  # x has size B x Npatches x D, image_pos has size Npatches x D, so this is broadcasting
        
        tokens= tokens.reshape(B, S, P, C)
        pos = pos.reshape(B, S, P, 2)

        # update P because we added special tokens
        # _, P, C = tokens.shape

        frame_idx = 0
        global_idx = 0
        output_list = []

        for _ in range(self.aa_block_num):
            for attn_type in self.aa_order:
                if attn_type == "frame":
                    tokens, frame_idx, frame_intermediates = self._process_frame_attention(
                        tokens, B, S, P, C, frame_idx, pos=pos
                    )
                elif attn_type == "global":
                    tokens, global_idx, global_intermediates = self._process_global_attention(
                        tokens, B, S, P, C, global_idx, pos=pos
                    )
                else:
                    raise ValueError(f"Unknown attention type: {attn_type}")

            for i in range(len(frame_intermediates)):
                # concat frame and global intermediates, [B x S x P x 2C]
                concat_inter = torch.cat([frame_intermediates[i], global_intermediates[i]], dim=-1)
                output_list.append(concat_inter)
            # output_list.append(frame_intermediates[-1])
            # output_list.append(global_intermediates[-1])

        del concat_inter
        del frame_intermediates
        del global_intermediates
        gc.collect()
        torch.cuda.empty_cache()
        return output_list

    def _process_frame_attention(self, tokens, B, S, P, C, frame_idx, pos=None):
        """
        Process frame attention blocks. We keep tokens in shape (B*S, P, C).
        """
        # If needed, reshape tokens or positions:
        if tokens.shape != (B * S, P, C):
            tokens = tokens.view(B, S, P, C).view(B * S, P, C)

        if pos is not None and pos.shape != (B * S, P, 2):
            pos = pos.view(B, S, P, 2).view(B * S, P, 2)

        intermediates = []

        # by default, self.aa_block_size=1, which processes one block at a time
        for _ in range(self.aa_block_size):
            tokens = self.frame_blocks[frame_idx](tokens, pos=pos)
            frame_idx += 1
            intermediates.append(tokens.view(B, S*P, C))

        return tokens, frame_idx, intermediates

    def _process_global_attention(self, tokens, B, S, P, C, global_idx, pos=None):
        """
        Process global attention blocks. We keep tokens in shape (B, S*P, C).
        """
        if tokens.shape != (B, S * P, C):
            tokens = tokens.view(B, S, P, C).view(B, S * P, C)

        if pos is not None and pos.shape != (B, S * P, 2):
            pos = pos.view(B, S, P, 2).view(B, S * P, 2)

        intermediates = []

        # by default, self.aa_block_size=1, which processes one block at a time
        for _ in range(self.aa_block_size):
            tokens = self.global_blocks[global_idx](tokens, pos=pos)
            global_idx += 1
            intermediates.append(tokens.view(B, S* P, C))

        return tokens, global_idx, intermediates
    
    def _generate_per_rank_generator(self):
        # this way, the randperm will be different for each rank, but deterministic given a fixed number of forward passes (tracked by self.random_generator)
        # and to ensure determinism when resuming from a checkpoint, we only need to save self.random_generator to state_dict
        # generate a per-rank random seed
        per_forward_pass_seed = torch.randint(0, 2 ** 32, (1,)).item()
        world_rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
        per_rank_seed = per_forward_pass_seed + world_rank

        # Set the seed for the random generator
        per_rank_generator = torch.Generator()
        per_rank_generator.manual_seed(per_rank_seed)
        return per_rank_generator

    def _get_random_image_pos(self, num_patches, batch_size, num_views, max_image_idx, device):
        """
        Generates non-repeating random image indices for each sample, retrieves corresponding
        positional embeddings for each view, and concatenates them.

        Args:
            encoded_feats (list of tensors): Encoded features for each view.
            batch_size (int): Number of samples in the batch.
            num_views (int): Number of views per sample.
            max_image_idx (int): Maximum image index for embedding.
            device (torch.device): Device to move data to.

        Returns:
            Tensor: Concatenated positional embeddings for the entire batch.
        """
        # Generate random non-repeating image IDs (on CPU)
        image_ids = torch.zeros(batch_size, num_views, dtype=torch.long)

        # First view is always 0 for all samples
        image_ids[:, 0] = 0

        # Get a generator that is unique to each rank, while also being deterministic based on the global across numbers of forward passes
        per_rank_generator = self._generate_per_rank_generator()

        # Generate random non-repeating IDs for the remaining views using the generator
        for b in range(batch_size):
            # Use the torch.Generator for randomness to ensure randomness between forward passes
            random_ids = torch.randperm(max_image_idx, generator=per_rank_generator)[:num_views - 1] + 1
            image_ids[b, 1:] = random_ids

        # Move the image IDs to the correct device
        image_ids = image_ids.to(device)

        # Initialize list to store positional embeddings for all views
        image_pos_list = []

        # for i in range(num_views):
        #     # Gather the positional embeddings for the entire batch based on the random image IDs
        #     image_pos_for_view = self.image_idx_emb[image_ids[:, i]]  # (B, D)

        #     # Expand the positional embeddings to match the number of patches
        #     image_pos_for_view = image_pos_for_view.unsqueeze(1).repeat(1, num_patches, 1)

        #     image_pos_list.append(image_pos_for_view)

        # # Concatenate positional embeddings for all views along the patch dimension
        # image_pos = torch.stack(image_pos_list, dim=1)  # (B, Npatches_total, D)

        image_pos = self.image_idx_emb[image_ids].reshape(*image_ids.shape, 1, -1)
        # print(image_pos.shape, image_pos2.shape)
        # print((image_pos2==image_pos).all())

        return image_pos




def slice_expand_and_flatten(token_tensor, B, S, vggt=True):
    """
    Processes specialized tokens with shape (1, 2, X, C) for multi-frame processing:
    1) Uses the first position (index=0) for the first frame only
    2) Uses the second position (index=1) for all remaining frames (S-1 frames)
    3) Expands both to match batch size B
    4) Concatenates to form (B, S, X, C) where each sequence has 1 first-position token
       followed by (S-1) second-position tokens
    5) Flattens to (B*S, X, C) for processing

    Returns:
        torch.Tensor: Processed tokens with shape (B*S, X, C)
    """

    if vggt:
        # Slice out the "query" tokens => shape (1, 1, ...)
        query = token_tensor[:, 0:1, ...].expand(B, 1, *token_tensor.shape[2:])
        # Slice out the "other" tokens => shape (1, S-1, ...)
        others = token_tensor[:, 1:, ...].expand(B, S - 1, *token_tensor.shape[2:])
        # Concatenate => shape (B, S, ...)
        combined = torch.cat([query, others], dim=1)

        # Finally flatten => shape (B*S, ...)
        combined = combined.view(B, S, *combined.shape[2:])
        return combined
    else:
        return token_tensor.expand(B, S, *token_tensor.shape[2:])
