import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit import Final

from timm.models.vision_transformer import Mlp, RmsNorm, LayerScale, DropPath
from timm.layers import use_fused_attn

import copy

class CrossAttention(nn.Module):
    """copy from timm"""
    fused_attn: Final[bool]

    def __init__(self,
        dim: int, num_heads: int = 8,
        qkv_bias: bool = False, qk_norm: bool = True,
        attn_drop: float = 0., proj_drop: float = 0.,
        norm_layer: nn.Module = RmsNorm,
    ) -> None:
        super().__init__()
        assert dim % num_heads == 0, 'dim should be divisible by num_heads'

        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.scale = self.head_dim ** -0.5
        self.fused_attn = use_fused_attn()

        if not self.fused_attn:
            print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")

        self.q = nn.Linear(dim, dim, bias = qkv_bias)
        self.kv = nn.Linear(dim, dim * 2, bias = qkv_bias)
        self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
        self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim, bias = qkv_bias)
        self.proj_drop = nn.Dropout(proj_drop)

    def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
        B, N, C = x.shape
        _, L, _ = c.shape

        q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        kv = self.kv(c).reshape(B, L, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
        k, v = kv.unbind(0)
        q, k = self.q_norm(q), self.k_norm(k)

        if self.fused_attn:
            x = F.scaled_dot_product_attention(
                q, k, v,
                dropout_p = self.attn_drop.p if self.training else 0.,
            )
        
        else:
            q = q * self.scale
            attn = q @ k.transpose(-2, -1)
            attn = F.softmax(attn, dim = -1)
            attn = self.attn_drop(attn)
            x = attn @ v

        x = x.transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)

        return x




class SelfAttention(nn.Module):
    """copy from timm"""
    fused_attn: Final[bool]

    def __init__(self,
        dim: int, num_heads: int = 8,
        qkv_bias: bool = False, qk_norm: bool = True,
        attn_drop: float = 0., proj_drop: float = 0.,
        norm_layer: nn.Module = RmsNorm,
    ) -> None:
        super().__init__()
        assert dim % num_heads == 0, 'dim should be divisible by num_heads'
        
        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.scale = self.head_dim ** -0.5
        self.fused_attn = use_fused_attn()

        if not self.fused_attn:
            print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")

        self.qkv = nn.Linear(dim, dim * 3, bias = qkv_bias)
        self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
        self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim, bias = qkv_bias)
        self.proj_drop = nn.Dropout(proj_drop)

    def forward(self, 
        x: torch.Tensor, 
        mask: torch.Tensor | None = None
    ) -> torch.Tensor:
        B, N, C = x.shape
        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
        q, k, v = qkv.unbind(0)
        q, k = self.q_norm(q), self.k_norm(k)

        if self.fused_attn:
            x = F.scaled_dot_product_attention(
                q, k, v,
                dropout_p = self.attn_drop.p if self.training else 0.,
                attn_mask = mask
            )

        else:
            q = q * self.scale
            attn = q @ k.transpose(-2, -1)
            attn = attn.masked_fill(mask == False, float('-inf'))
            attn = F.softmax(attn, dim = -1)
            attn = self.attn_drop(attn)
            x = attn @ v

        x = x.transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x


class EncoderBlock(nn.Module):
    """copy from timm"""

    def __init__(self, 
        dim, num_heads, 
        qkv_bias = True, qk_norm = True, 
        proj_drop = 0, attn_drop = 0, init_values = None, drop_path = 0, 
        act_layer = nn.GELU, norm_layer = RmsNorm, 
        mlp_ratio = 4, mlp_layer = Mlp, mlp_bias = False
    ):
        super().__init__()

        self.norm1 = norm_layer(dim)
        self.attn = SelfAttention(
            dim, num_heads = num_heads,
            qkv_bias = qkv_bias, qk_norm = qk_norm,
            attn_drop = attn_drop, proj_drop = proj_drop,
            norm_layer = norm_layer,
        )
        self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
        self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()

        self.norm2 = norm_layer(dim)
        self.mlp = mlp_layer(
            in_features = dim,
            hidden_features = int(dim * mlp_ratio),
            act_layer = act_layer,
            drop = proj_drop,
            bias = mlp_bias
        )
        self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
        self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()


    def forward(self, x, mask = None):
        x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), mask = mask)))
        x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
        return x


class DecoderBlock(nn.Module):
    def __init__(self, 
        dim, num_heads, 
        qkv_bias = True, qk_norm = True, 
        proj_drop = 0, attn_drop = 0, init_values = None, drop_path = 0, 
        act_layer = nn.GELU, norm_layer = RmsNorm, 
        mlp_ratio = 4, mlp_layer = Mlp, mlp_bias = False
    ):
        super().__init__()

        self.norm1 = norm_layer(dim)
        self.self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_norm, attn_drop, proj_drop, norm_layer)

        self.norm2 = norm_layer(dim)
        self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_norm, attn_drop, proj_drop, norm_layer)

        self.norm3 = norm_layer(dim)
        self.mlp = mlp_layer(
            in_features = dim,
            hidden_features = int(dim * mlp_ratio),
            act_layer = act_layer,
            drop = proj_drop,
            bias = mlp_bias
        )

    def forward(self, x, c):
        x = x + self.self_attn(self.norm1(x))
        x = x + self.cross_attn(self.norm2(x), c)
        x = x + self.mlp(self.norm3(x))

        return x

class TransformerEncoder(nn.Module):
    def __init__(self, encoder_layer, num_layers):
        super().__init__()

        self.layers = _get_clones(encoder_layer, num_layers)

    def forward(self, x):
        for layer in self.layers:
            x = layer(x)
        
        return x

class TransformerDecoder(nn.Module):
    def __init__(self, decoder_layer, num_layers):
        super().__init__()

        self.layers = _get_clones(decoder_layer, num_layers)

    def forward(self, x, c):
        for layer in self.layers:
            x = layer(x, c)
        
        return x


class PatchEmbed(nn.Module):
    """ 2D Image to Patch Embedding
    """
    def __init__(
        self,
        patch_size: int = 16,
        in_chans: int = 3,
        embed_dim: int = 768,
        norm_layer = None,
        flatten: bool = True,
        bias: bool = False,
    ):
        super().__init__()
        self.flatten = flatten

        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
        self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()

    def forward(self, x):    
        x = self.proj(x)
        
        if self.flatten:
            x = x.flatten(2).transpose(1, 2)  # BCHW -> BNC

        x = self.norm(x)

        return x
  

def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
