import torch
import torch.nn as nn
from mmcls.models import BACKBONES
from mmcls.models.backbones import VisionTransformer
from mmcls.models.utils import resize_pos_embed
from typing import List


from mmcv.runner.base_module import BaseModule
from mmcv.cnn.bricks.registry import DROPOUT_LAYERS


class MultiheadCrossAttention(BaseModule):
    def __init__(self,
                    embed_dims,
                    num_heads=1,
                    input_dims=None,
                    attn_drop=0.,
                    proj_drop=0.,
                    dropout_layer=dict(type='Dropout', drop_prob=0.),
                    qkv_bias=True,
                    qk_scale=None,
                    proj_bias=True,
                    v_shortcut=False,
                    init_cfg=None):
        super().__init__(init_cfg=init_cfg)

        self.input_dims = input_dims or embed_dims
        self.embed_dims = embed_dims
        self.num_heads = num_heads
        self.v_shortcut = v_shortcut

        self.head_dims = embed_dims // num_heads
        self.scale = qk_scale or self.head_dims**-0.5

        self.q = nn.Linear(self.input_dims, embed_dims, bias=qkv_bias) # qkv_bias: True
        self.k = nn.Linear(self.input_dims, embed_dims, bias=qkv_bias) # qkv_bias: True
        self.v = nn.Linear(self.input_dims, embed_dims, bias=qkv_bias) # qkv_bias: True
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(embed_dims, embed_dims, bias=proj_bias) # proj_bias: True
        self.proj_drop = nn.Dropout(proj_drop)

        self.out_drop = DROPOUT_LAYERS.build(dropout_layer)

    def forward(self, x, cat_x):

        q = self.q(x) # q:(4,577,768,1)
        k = self.k(cat_x) # kv:(4,577,769,1)
        v = self.v(cat_x) # kv:(4,577,769,1)
        # qkv = qkv.reshape(B, N, 3, self.num_heads,self.head_dims).permute(2, 0, 3, 1, 4) # qkv:(3,4,12,577,64)
        # q, k, v = qkv[0], qkv[1], qkv[2] #target:q:(4,12,577,64)->q(4,577,769,1)

        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)

        x = (attn @ v)
        x = self.proj(x)
        x = self.out_drop(self.proj_drop(x))
        x = x.squeeze(-1)

        return x





@BACKBONES.register_module()
class ATTNIMT(VisionTransformer):

    def __init__(self,
                 prompt_length: int = 1,
                 prompt_layers: List[int] = None,
                 prompt_pos: str = 'postpend',
                 prompt_init: str = 'normal',
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        for param in self.parameters():
            param.requires_grad = False


        # self.prompt_layers = [0] if prompt_layers is None else prompt_layers  
        self.prompt_layers = range(12) if prompt_layers is None else prompt_layers
        prompt = torch.empty(
            len(self.prompt_layers), 577, prompt_length)
        
        # for layer in self.prompt_layers:
        #     # linear_layer = nn.Linear(self.embed_dims+1, self.embed_dims*3,bias=True)# qkv:(769,768*3)
        #     linear_layer = nn.Linear(self.embed_dims+prompt_length, self.embed_dims)
        #     self.add_module(f'linear_{layer}', linear_layer)


        if prompt_init == 'uniform':
            nn.init.uniform_(prompt, -0.08, 0.08)
        elif prompt_init == 'zero':
            nn.init.zeros_(prompt)
        elif prompt_init == 'kaiming':
            nn.init.kaiming_normal_(prompt)
        elif prompt_init == 'token':
            nn.init.zeros_(prompt)
            self.prompt_initialized = False
        else:
            nn.init.normal_(prompt, std=0.02)
        self.prompt = nn.Parameter(prompt, requires_grad=True)
        self.prompt_length = prompt_length
        self.prompt_pos = prompt_pos


        # for layer in self.prompt_layers:
        operator = MultiheadCrossAttention(prompt_length)
        self.add_module(f'operator', operator)


    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0] # batch size
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1) # x: (B,577,768)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens) # x: (B,577,768)
        x = self.drop_after_pos(x) # x: (B,577,768)

        # self.prompt: (1=layer,577,1)
        # prompt: (1=layer,4,577,1)
        prompt = self.prompt.unsqueeze(1).expand(-1, x.shape[0], -1, -1)

        outs = []
        for i, layer in enumerate(self.layers):
            if i in self.prompt_layers:
                if self.prompt_pos == 'postpend':
                    # prompt: [layer, batch, length, dim]->(1,4,577,1)
                    ## x: (B,577,768) -> x: (B, 577,769)
                    cat_x = torch.cat([x[:, :, :], prompt[i, :, :, :]], dim=2)
    
            # 重写 x = layer(x)
            # cross attention
                operator = getattr(self, f'operator')
                cat_x = cat_x.unsqueeze(-1)
                f_x = operator(cat_x[:,:,:self.embed_dims,:], cat_x)
                
                x = layer(f_x)
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(x[:, 0])

        return tuple(outs)