from torch import nn
import torch.nn.functional as F
from einops import rearrange
from .vit_modules import *
from .pos_embed import get_nd_sincos_pos_embed, resample_abs_pos_embed
from data_utils.data_config import DataManager

class PatchEmbedNd(nn.Linear):
    ''' Patch Embed and SinCos Pos Embed'''
    def __init__(self, manager: DataManager, in_chans, out_chans):
        super().__init__(in_chans, out_chans)
        self.manager = manager
        ## create pos embedding for each modality
        self.all_embeds = nn.ParameterList()
        for m_idx, m in enumerate(manager):
            embed = nn.Parameter(
                torch.from_numpy(get_nd_sincos_pos_embed(out_chans, m.grid_size)).float().unsqueeze(0), requires_grad=False
            )
            self.all_embeds.append(embed)
        
    def forward(self, x, m_idx):
        x_grid = x.shape[2:]
        d = len(x_grid)
        m = self.manager[m_idx]
        lo, hi = m.get_range()
        p, c = m.patch_size, m.num_channels
        g = [x_l // p_l for x_l, p_l in zip(x_grid, p)]
        
        # patchify
        if d == 3:
            x = rearrange(
                x, 
                'b c (g0 p0) (g1 p1) (g2 p2) -> b (g0 g1 g2) (c p0 p1 p2)', 
                c=c, g0=g[0], g1=g[1], g2=g[2], p0=p[0], p1=p[1], p2=p[2]
                )
        elif d == 2:
            x = rearrange(
                x, 
                'b c (g0 p0) (g1 p1) -> b (g0 g1) (c p0 p1)', 
                c=c, g0=g[0], g1=g[1], p0=p[0], p1=p[1],
                )
        else:
            raise NotImplementedError

        weight = self.weight[:, lo:hi]
        x_proj = F.linear(x, weight, self.bias)
        pos_embed = self.all_embeds[m_idx]
        
        if tuple(m.grid_size) != tuple(g):
            # pos_embed shape mismatch
            pos_embed = resample_abs_pos_embed(pos_embed, g, old_size=m.grid_size, num_prefix_tokens=0)
        x_proj = x_proj + pos_embed
        return x_proj


class VisionTransformer(nn.Module):
    def __init__(self, manager, in_chans, embed_dim, depth, num_heads, mlp_ratio=4.0, qkv_bias=True, qk_norm=False,  init_values=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.):
        super().__init__()
        self.manager = manager
        self.embed_dim = embed_dim
        self.patch_embed = PatchEmbedNd(self.manager, in_chans, self.embed_dim)

        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule
        self.blocks = nn.Sequential(*[
            Block(
                dim=embed_dim,
                num_heads=num_heads,
                mlp_ratio=mlp_ratio,
                qkv_bias=qkv_bias,
                qk_norm=qk_norm,
                init_values=init_values,
                drop=drop_rate,
                attn_drop=attn_drop_rate,
                drop_path=dpr[i],
            )
            for i in range(depth)])

        self.norm = LayerNorm(embed_dim)
        self.init_weights()


    def init_weights(self):
        """ ViT weight initialization."""
        torch.nn.init.normal_(self.cls_token, std=.02)
        self.apply(self._init_weights)
    
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            # we use xavier_uniform following official JAX ViT:
            torch.nn.init.xavier_uniform_(m.weight)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x, m_idx):
        # patch to encoder tokens and add positions
        x = self.patch_embed(x, m_idx)
        x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
        x = self.blocks(x)
        x = self.norm(x)
        return x
