from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F

class LayerScale(nn.Module):
    def __init__(
        self,
        dim: int,
        init_values: float = 1e-5,
    ) -> None:
        super().__init__()
        self.scale_factor = nn.Parameter(init_values * torch.ones(dim))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        scale_factor = self.scale_factor
        return x * scale_factor


class Attntion(nn.Module):
    def __init__(
        self,
        dim: int,
        num_heads: int = 8,
    ):
        super().__init__()

        assert dim % num_heads == 0, "dim should be divisible by num_heads"
        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.scale = self.head_dim**-0.5
        self.fused_attn = True

        self.qkv = nn.Linear(dim, dim * 3, bias=True)

        self.proj = nn.Linear(dim, dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        B, N, C = x.shape
        qkv = (
            self.qkv(x)
            .reshape(B, N, 3, self.num_heads, self.head_dim)
            .permute(2, 0, 3, 1, 4)
        )
        q, k, v = qkv.unbind(0)

        # if self.fused_attn:
        x = F.scaled_dot_product_attention(
            q,
            k,
            v,
        )
        # else:
        #     q = q * self.scale
        #     attn = q @ k.transpose(-2, -1)
        #     attn = attn.softmax(dim=-1)
        #     x = attn @ v

        x = x.transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        return x


class AttentionPoolLatent(nn.Module):
    def __init__(
        self,
        in_features: int,
        num_heads: int = 8,
        mlp_ratio: float = 4.0,
    ):
        super().__init__()

        embed_dim = in_features
        assert embed_dim % num_heads == 0
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads
        self.scale = self.head_dim**-0.5

        self.pos_embed = None

        self.latent_dim = embed_dim
        self.latent_len = 1

        self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim))

        self.q = nn.Linear(embed_dim, embed_dim, bias=True)
        self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=True)
        self.proj = nn.Linear(embed_dim, embed_dim, bias=True)

        self.mlp = MLP(embed_dim, int(embed_dim * mlp_ratio))
        self.norm = nn.LayerNorm(embed_dim, eps=1e-6)

    def forward(self, x):
        B, N, C = x.shape

        q_latent = self.latent.expand(B, -1, -1)  # B, self.latent_len = 1, 1152
        # self.num_heads = 16
        # self.head_dim = 72
        q = (
            self.q(q_latent)
            .reshape(B, self.latent_len, self.num_heads, self.head_dim)
            .transpose(1, 2)
        )

        kv = (
            self.kv(x)
            .reshape(B, N, 2, self.num_heads, self.head_dim)
            .permute(2, 0, 3, 1, 4)
        )
        k, v = kv.unbind(0)

        # if self.fused_attn:
        x = F.scaled_dot_product_attention(q, k, v)
        # else:
        # q = q * self.scale
        # attn = q @ k.transpose(-2, -1)
        # attn = attn.softmax(dim=-1)
        # x = attn @ v

        x = x.transpose(1, 2).reshape(B, self.latent_len, C)
        x = self.proj(x)

        x = x + self.mlp(self.norm(x))

        x = x[:, 0]  # Pooling token

        return x


class MLPLinear(nn.Linear):
    pass


class MLP(nn.Module):
    def __init__(self, in_features, hidden_features):
        super().__init__()
        self.fc1 = MLPLinear(in_features, hidden_features, bias=True)
        self.act = nn.GELU()
        self.fc2 = MLPLinear(hidden_features, in_features, bias=True)

    def forward(self, x):
        return self.fc2(self.act(self.fc1(x)))


class Block(nn.Module):
    def __init__(
        self,
        dim: int,
        num_heads: int,
        mlp_ratio: float = 4.0,
        init_values: Optional[float] = None,
        eps: float = 1e-6,
    ):
        super().__init__()
        self.ls1 = (
            LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
        )

        self.norm1 = nn.LayerNorm(dim, eps=eps)
        self.attn = Attntion(dim, num_heads)

        self.norm2 = nn.LayerNorm(dim, eps=eps)
        self.mlp = MLP(
            in_features=dim,
            hidden_features=int(dim * mlp_ratio),
        )
        self.ls2 = (
            LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x1 = self.ls1(self.attn(self.norm1(x)))
        x = x + x1
        x = x + self.ls2(self.mlp(self.norm2(x)))
        return x


class PatchEmbed(nn.Module):
    def __init__(
        self,
        img_size: Optional[int] = 224,
        patch_size: int = 16,
        in_chans: int = 3,
        embed_dim: int = 768,
        bias: bool = True,
    ):
        super().__init__()
        self.img_size = img_size
        self.patch_size = patch_size
        self.in_chans = in_chans
        self.embed_dim = embed_dim
        self.grid_size = self.img_size // self.patch_size
        self.num_patches = self.grid_size**2

        self.proj = nn.Conv2d(
            in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias
        )

    def forward(self, x):
        x = self.proj(x)
        return x.flatten(2).transpose(1, 2)


class ViT(nn.Module):
    def __init__(
        self,
        img_size: Union[int, Tuple[int, int]] = 224,
        patch_size: Union[int, Tuple[int, int]] = 16,
        in_chans: int = 3,
        num_heads: int = 12,
        mlp_ratio: float = 4.0,
        embed_dim: int = 768,
        reg_tokens: int = 0,
        class_token: bool = True,
        init_values: Optional[float] = None,
        depth: int = 24,
        global_pool_map: bool = False,
        eps: float = 1e-6,
        pre_norm: bool = False,
    ):
        super().__init__()

        self.depth = depth
        self.patch_embed = PatchEmbed(
            img_size=img_size,
            patch_size=patch_size,
            in_chans=in_chans,
            embed_dim=embed_dim,
            bias=not pre_norm,
        )
        self.norm_pre = nn.LayerNorm(embed_dim, eps=eps) if pre_norm else nn.Identity()

        num_patches = self.patch_embed.num_patches

        self.cls_token = (
            nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
        )
        self.reg_token = (
            nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None
        )

        self.num_prefix_tokens = 1 if class_token else 0
        self.num_prefix_tokens += reg_tokens

        self.pos_embed = nn.Parameter(
            torch.randn(
                1,
                num_patches
                + (self.num_prefix_tokens if self.num_prefix_tokens == 1 else 0),
                embed_dim,
            )
            * 0.02
        )

        self.blocks = nn.Sequential(
            *(
                Block(
                    dim=embed_dim,
                    num_heads=num_heads,
                    mlp_ratio=mlp_ratio,
                    init_values=init_values,
                    eps=eps,
                )
                for _ in range(depth)
            )
        )

        self.norm = nn.LayerNorm(embed_dim, eps=eps)

        if global_pool_map:
            self.attn_pool = AttentionPoolLatent(
                embed_dim,
                num_heads=num_heads,
                mlp_ratio=mlp_ratio,
            )
        else:
            self.attn_pool = None

    def vla_forward(self, x: torch.Tensor, last_n_layer: int = 2) -> torch.Tensor:
        output_layer_idx = self.depth - last_n_layer

        # __________ Patch Embedd __________

        x = self.patch_embed(x)

        # __________ Pos Embedd __________

        pos_embed = self.pos_embed

        to_cat = []
        if self.cls_token is not None:  # torch.Size([1, 1, 1024])
            to_cat.append(self.cls_token.expand(x.shape[0], -1, -1))
        if self.reg_token is not None:  # torch.Size([1, 4, 1024])
            to_cat.append(self.reg_token.expand(x.shape[0], -1, -1))

        if to_cat and self.reg_token is None:
            x = torch.cat(to_cat + [x], dim=1)
        x = x + pos_embed
        if to_cat and self.reg_token is not None:
            x = torch.cat(to_cat + [x], dim=1)

        # __________ Pre Norm  __________

        x = self.norm_pre(x)

        # __________ Select Blocks  __________
        # hidden_states = []
        for idx in range(output_layer_idx + 1):
            x = self.blocks[idx](x)
        #     hidden_states.append((self.blocks[idx].name,x))
        # self.hidden_states = hidden_states

        return x[:, self.num_prefix_tokens :]

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # __________ Patch Embedd __________

        x = self.patch_embed(x)

        # __________ Pos Embedd __________

        pos_embed = self.pos_embed

        to_cat = []
        if self.cls_token is not None:  # torch.Size([1, 1, 1024])
            to_cat.append(self.cls_token.expand(x.shape[0], -1, -1))
        if self.reg_token is not None:  # torch.Size([1, 4, 1024])
            to_cat.append(self.reg_token.expand(x.shape[0], -1, -1))

        if to_cat and self.reg_token is None:
            x = torch.cat(to_cat + [x], dim=1)
        x = x + pos_embed
        if to_cat and self.reg_token is not None:
            x = torch.cat(to_cat + [x], dim=1)
        # __________ Pre Norm  __________

        x = self.norm_pre(x)

        # __________ Attn BLocks __________

        x = self.blocks(x)

        # __________ Post Attn Norm __________

        x = self.norm(x)

        # __________ Pooling token __________

        if self.attn_pool is not None:
            x = self.attn_pool(x)
        else:
            x = x[:, 0]

        return x

    def patch_for_vla(self):
        self.__org_forward = self.forward
        self.forward = self.vla_forward
        del self.blocks[-1]
        del self.norm
        if hasattr(self, "attn_pool") and self.attn_pool is not None:
            del self.attn_pool


class VLAProjector(nn.Module):
    def __init__(self, vision_dim: int, llm_dim: int) -> None:
        super().__init__()

        self.vision_dim, self.llm_dim = vision_dim, llm_dim

        initial_projection_dim = 4 * vision_dim
        self.fc1 = nn.Linear(self.vision_dim, initial_projection_dim, bias=True)
        self.fc2 = nn.Linear(initial_projection_dim, self.llm_dim, bias=True)
        self.fc3 = nn.Linear(self.llm_dim, self.llm_dim, bias=True)
        self.act_fn1 = nn.GELU()
        self.act_fn2 = nn.GELU()

    def forward(self, img_patches: torch.Tensor) -> torch.Tensor:
        projected_features = self.fc1(img_patches)
        projected_features = self.act_fn1(projected_features)
        projected_features = self.fc2(projected_features)
        projected_features = self.act_fn2(projected_features)
        projected_features = self.fc3(projected_features)

        # assert not torch.any(torch.isnan(projected_features)), torch.sum(
        #     torch.isnan(projected_features)
        # )
        # torch.cuda.empty_cache()

        return projected_features
