import torch
import torch.nn as nn
from mmcls.models import BACKBONES
from mmcls.models.backbones import VisionTransformer
from mmcls.models.utils import resize_pos_embed
from typing import List

import torch.nn.functional as F


class CustomLinear(nn.Module):
    def __init__(self,source_linear,prompt_length,embed_dims):
        super(CustomLinear, self).__init__()
        self.embed_dims = embed_dims
        self.ori_qkv = source_linear
        qkv_extra_params = torch.empty(prompt_length,self.embed_dims*3)
        bias = torch.empty(self.embed_dims*3)
        nn.init.normal_(qkv_extra_params, std=0.02)
        nn.init.normal_(bias, std=0.02)
        self.qkv_extra = nn.Parameter(qkv_extra_params) # target: (768,769)
        self.bias = nn.Parameter(bias)

    def forward(self, x):
        B, N, _ = x.shape # x:(4,577,769)
        ori_qkv = self.ori_qkv(x[:,:,:self.embed_dims]) # ori_qkv:(4,577,2304)
        qkv_extra = x[:,:,self.embed_dims:] @ self.qkv_extra + self.bias
        qkv = ori_qkv + qkv_extra
        qkv = qkv.reshape(B, N, 3, 12,64).permute(2, 0, 3, 1, 4)
        return qkv

@BACKBONES.register_module()
class DepthVPT(VisionTransformer):
    def __init__(self,
                 prompt_length: int = 1,
                 prompt_layers: List[int] = None,
                 prompt_pos: str = 'postpend',
                 prompt_init: str = 'normal',
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        for param in self.parameters():
            param.requires_grad = False

        # self.prompt_layers = [0] if prompt_layers is None else prompt_layers  
        self.prompt_layers = range(12) if prompt_layers is None else prompt_layers
        # 577: 576 patches + 1 CLS token
        prompt = torch.empty(len(self.prompt_layers), 577, prompt_length)
    

        for layer in self.prompt_layers:
            # linear_layer = nn.Linear(self.embed_dims+1, self.embed_dims*3,bias=True)# qkv:(769,768*3)
            linear_layer = CustomLinear(self.layers[layer].attn.qkv,prompt_length,self.embed_dims)
            self.add_module(f'customlinear_{layer}', linear_layer)


        nn.init.normal_(prompt, std=0.02)

        # 在每一层的prompt中添加qkv_extra_params   
        # self.qkv_extra_params = nn.Parameter(qkv_extra_params, requires_grad=True)
        self.prompt = nn.Parameter(prompt, requires_grad=True)
        self.prompt_length = prompt_length
        self.prompt_pos = prompt_pos

    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0] # batch size
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1) # x: (B,577,768)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens) # x: (B,577,768)
        x = self.drop_after_pos(x) # x: (B,577,768)

        # self.prompt: (1=layer,577,1)
        # prompt: (1=layer,4,577,1)
        prompt = self.prompt.unsqueeze(1).expand(-1, x.shape[0], -1, -1)

        outs = []
        for i, layer in enumerate(self.layers):
            if i in self.prompt_layers:
                if self.prompt_pos == 'postpend':
                    # prompt: [layer, batch, length, dim]->(1,4,577,1)
                    ## x: (B,577,768) -> x: (B, 577,769)
                    x_769 = torch.cat([x[:, :, :], prompt[i, :, :, :]], dim=2)
    
            # 重写 x = layer(x)
                ## 得到attn_x
                def attention(attention_layer, x):
                    # 重写qkv计算,transformer encoder layer
                    custom_linear = getattr(self, f'customlinear_{i}')
                    B, N, _ = x.shape # x:(B, 577,769)
                    qkv = custom_linear(x)
                    q, k, v = qkv[0], qkv[1], qkv[2]

                    attn = (q @ k.transpose(-2, -1)) * attention_layer.scale
                    attn = attn.softmax(dim=-1)
                    attn = attention_layer.attn_drop(attn)

                    x = (attn @ v).transpose(1, 2).reshape(B, N, attention_layer.embed_dims)
                    x = attention_layer.proj(x)
                    x = attention_layer.out_drop(attention_layer.proj_drop(x))

                    if attention_layer.v_shortcut:
                        x = v.squeeze(1) + x
                    return x
                # 目的是实现Norm
                norm_x = torch.cat([layer.norm1(x_769[:,:,:self.embed_dims]), x_769[:,:,self.embed_dims:]],dim=2)
                x = x + attention(layer.attn, norm_x)
                x = layer.ffn(layer.norm2(x), identity=x)
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(x[:, 0])

        return tuple(outs)
