from timm.models.vision_transformer_hybrid import vit_base_r26_s32_224
import torch
from torch import einsum
from torch._C import dtype
import torch.nn as nn
from torch import Tensor
import numpy as np
import timm.models as timmmodels
import math
from einops import rearrange

__all__ = ['Attention', 'deit_extractor', 'vit_extractor']

class vit_extractor(nn.Module):
    def __init__(self, backbone='vit_base_patch16_224', pretrained=True, block_index=[2, 6, 10]):
        super(vit_extractor, self).__init__()
        assert backbone in ['vit_base_patch16_224', 'vit_base_patch16_384'], 'Backbone should be vit_base_patch16_224 or vit_base_patch16_384.'
        model = getattr(timmmodels, backbone)(pretrained=pretrained)
        model.eval()
        self.pos_embed = model.pos_embed
        self.patch_embed = model.patch_embed
        self.pos_drop = model.pos_drop
        self.blocks = model.blocks
        self.cls_token = model.cls_token
        self.dist_token = model.dist_token
        self.block_index = block_index

    def forward(self, x):
        x = self.patch_embed(x)
        cls_token = self.cls_token.expand(x.shape[0], -1, -1)  # stole cls_tokens impl from Phil Wang, thanks
        if self.dist_token is None:
            x = torch.cat((cls_token, x), dim=1)
        else:
            x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
        x = self.pos_drop(x + self.pos_embed)

        feature_list = []
        block_index = self.block_index.copy()
        for i, blk in enumerate(self.blocks):
            # if i < self.depth:
            x = blk(x)
            if i in block_index:
                block_index.remove(i)
                feature_list.append(x)
        
        return feature_list

class deit_extractor(nn.Module):
    def __init__(self, backbone='deit_base_distilled_patch16_384', depth=7, pretrained=True):
        super(deit_extractor, self).__init__()
        assert backbone in ['deit_base_distilled_patch16_384', 'deit_base_distilled_patch16_224'], 'Backbone should be deit_base_distilled_patch16_384 or deit_base_distilled_patch16_224.'
        # model = timmmodels.deit_base_distilled_patch16_384(pretrained=pretrained)
        model = getattr(timmmodels, backbone)(pretrained=pretrained)
        model.eval()
        self.embed_dim = model.embed_dim
        # self.dist_token = nn.Parameter(torch.zeros(1, 1, model.embed_dim))
        num_patches = model.patch_embed.num_patches
        self.side = int(np.sqrt(num_patches))
        # self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, model.embed_dim))
        self.pos_embed = model.pos_embed
        self.patch_embed = model.patch_embed
        self.pos_drop = model.pos_drop
        self.blocks = model.blocks[:depth]
        self.cls_token = model.cls_token
        self.dist_token = model.dist_token
        self.depth = depth

    def forward(self, x: Tensor) -> Tensor:
        B = x.shape[0]
        x = self.patch_embed(x)

        cls_token = self.cls_token.expand(B, -1, -1)
        dist_token = self.dist_token.expand(B, -1, -1)
        x = torch.cat((cls_token, dist_token, x), dim=1)

        x = x + self.pos_embed
        x = self.pos_drop(x)

        for i, blk in enumerate(self.blocks):
            # if i < self.depth:
            x = blk(x)

        return x[:, 2:].transpose(1, 2).view(-1, self.embed_dim, self.side, self.side)

def pair(x):
    return (x, x) if not isinstance(x, tuple) else x

def expand_dim(t, dim, k):
    t = t.unsqueeze(dim = dim)
    expand_shape = [-1] * len(t.shape)
    expand_shape[dim] = k
    return t.expand(*expand_shape)

def rel_to_abs(x):
    b, h, l, _, device, dtype = *x.shape, x.device, x.dtype
    dd = {'device': device, 'dtype': dtype}
    col_pad = torch.zeros((b, h, l, 1), **dd)
    # print('col pad', col_pad.shape)
    x = torch.cat((x, col_pad), dim = 3)
    # print('x', x.shape)
    flat_x = rearrange(x, 'b h l c -> b h (l c)')
    # print('flat x', flat_x.shape)
    flat_pad = torch.zeros((b, h, l - 1), **dd)
    # print('flat pad', flat_pad.shape)
    flat_x_padded = torch.cat((flat_x, flat_pad), dim = 2)
    # print('flat x padded', flat_x_padded.shape)
    final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1)
    final_x = final_x[:, :, :l, (l-1):]
    return final_x

def relative_logits_1d(q, rel_k):
    b, heads, h, w, dim = q.shape
    logits = einsum('b h x y d, r d -> b h x y r', q, rel_k)
    logits = rearrange(logits, 'b h x y r -> b (h x) y r')
    logits = rel_to_abs(logits)
    logits = logits.reshape(b, heads, h, w, w)
    logits = expand_dim(logits, dim = 3, k = h)
    return logits

# positional embeddings
class AbsPosEmb(nn.Module):
    def __init__(
        self,
        fmap_size,
        dim_head
    ):
        super().__init__()
        height, width = pair(fmap_size)
        scale = dim_head ** -0.5
        self.height = nn.Parameter(torch.randn(height, dim_head) * scale)
        self.width = nn.Parameter(torch.randn(width, dim_head) * scale)

    def forward(self, q):
        emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d')
        emb = rearrange(emb, ' h w d -> (h w) d')
        logits = einsum('b h i d, j d -> b h i j', q, emb)
        return logits

class RelPosEmb(nn.Module):
    def __init__(
        self,
        fmap_size,
        dim_head
    ):
        super().__init__()
        height, width = pair(fmap_size)
        scale = dim_head ** -0.5
        self.fmap_size = fmap_size
        self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
        self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)

    def forward(self, q):
        h = w = self.fmap_size

        q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w)
        rel_logits_w = relative_logits_1d(q, self.rel_width)
        rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)')

        q = rearrange(q, 'b h x y d -> b h y x d')
        rel_logits_h = relative_logits_1d(q, self.rel_height)
        rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)')
        return rel_logits_w + rel_logits_h

## Attention layers
class Attention(nn.Module):
    def __init__(
        self,
        *,
        dim,
        fmap_size,
        heads = 4,
        dim_head = 128,
        rel_pos_emb = False
    ):
        super().__init__()
        self.heads = heads
        self.scale = dim_head ** -0.5
        inner_dim = heads * dim_head

        self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False)

        rel_pos_class = AbsPosEmb if not rel_pos_emb else RelPosEmb
        self.pos_emb = rel_pos_class(fmap_size, dim_head)

    def forward(self, fmap):
        heads, b, c, h, w = self.heads, *fmap.shape

        q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
        q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v))

        q *= self.scale

        sim = einsum('b h i d, b h j d -> b h i j', q, k)
        sim += self.pos_emb(q)

        attn = sim.softmax(dim = -1)

        out = einsum('b h i j, b h j d -> b h i d', attn, v)
        out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
        return out

if __name__ == '__main__':
    x = torch.rand((32, 3, 224, 224))
    vit = vit_extractor()
    feature_list = vit(x)
    print(len(feature_list))
    for feature_cache in feature_list:
        print(feature_cache.shape)
        e = feature_cache.transpose(1, 2)[...,1:]
        print(e.shape)
