import torch
import torch.nn as nn

from mmcls.models import BACKBONES
from mmcls.models.backbones import VisionTransformer
from mmcls.models.utils import resize_pos_embed
from typing import List
from SOFT.kernel.subtraction import subtraction_gaussian_kernel
from SOFT.kernel.inverse import newton_inverse_kernel


from medfmc.models.LoRA.lora_layers import Linear

device = 'cuda:0'



def subtraction_gaussian_kernel_torch(q, k):
    # [B, H, H1*W1, C] @ [C, H2*W2] -> [B, H, H1*W1, H2*W2]
    matA_square = q ** 2. @ torch.ones(k.shape[-2:]).cuda()
    # [H1*W1, C] @ [B, H, C, H2*W2] -> [B, H, H1*W1, H2*W2]
    matB_square = torch.ones(q.shape[-2:]).cuda() @ k ** 2.
    return matA_square + matB_square - 2. * (q @ k)


@BACKBONES.register_module()
class SOFTIMT(VisionTransformer):

    def __init__(self,
                 prompt_length: int = 1,
                 prompt_layers: List[int] = None,
                 prompt_pos: str = 'postpend',
                 prompt_init: str = 'token',
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        for param in self.parameters():
            param.requires_grad = False
        # self.prompt_layers = [0]
        self.prompt_layers = range(12) if prompt_layers is None else prompt_layers
        prompt = torch.empty(len(self.prompt_layers), 577, prompt_length)
        W2 = torch.empty(prompt_length, self.embed_dims)
    
        if prompt_init == 'uniform':
            nn.init.uniform_(prompt, -0.08, 0.08)
        elif prompt_init == 'zero':
            nn.init.zeros_(prompt)
        elif prompt_init == 'kaiming':
            nn.init.kaiming_normal_(prompt)
        elif prompt_init == 'token':
            nn.init.ones_(W2)
            nn.init.zeros_(prompt)
            self.prompt_initialized = False
        else:
            nn.init.ones_(W2)
            nn.init.normal_(prompt, std=0.02)
        self.prompt = nn.Parameter(prompt, requires_grad=True)
        self.prompt_length = prompt_length
        self.prompt_pos = prompt_pos

        self.W2 = nn.Parameter(W2, requires_grad=False)
        self.max_iter=20
        self.num_heads, self.head_dims = self.layers[0].attn.num_heads,self.layers[0].attn.head_dims
        # self.Qlandmark_op = nn.Conv2d(self.head_dims, self.head_dims, kernel_size=8, stride=8, bias=False)
        self.Qnorm_act = nn.Sequential(nn.LayerNorm(self.head_dims), nn.GELU())
        self.kernel_function = subtraction_gaussian_kernel_torch



    
    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0] # batch size
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1) # x: (B,577,768)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens) # x: (B,577,768)
        x = self.drop_after_pos(x) # x: (B,577,768)




        # Add prompt
        if hasattr(self, 'prompt_initialized') and not self.prompt_initialized:
            with torch.no_grad():
                self.prompt.data += x[:,:,:].mean([0, 2]).detach().clone()[None, :, None]
            self.prompt_initialized = True
        # self.prompt: (1=layer,577,1)
        # prompt: (1=layer,4,577,1)
        prompt = self.prompt.unsqueeze(1).expand(-1, x.shape[0], -1, -1)
        # print(prompt[0,0,0,0])
        # print(self.Qlandmark_op.weight[0,0,0,0])
        outs = []


        for i, layer in enumerate(self.layers):
            if i in self.prompt_layers:
                if self.prompt_pos == 'postpend':
                    # prompt: [layer, batch, length, dim]->(1,4,577,1)
                    # x: (B,577,768) -> x: (B, 577,769)
                    x_769 = torch.cat([x[:, :, :], prompt[i, :, :, :]], dim=2)
                    W1 = torch.eye(768).to(device)  # 形状为 (768, 768) 的单位矩阵 M1
                    # W2 = torch.ones(self.prompt_length, 768)  # 形状为 (1, 768) 的单位矩阵 M2
                    W = torch.cat((W1, self.W2), dim=0)  # 将 M1 和 M2 沿着行维度拼接得到 M
                    x = x_769 @ W # x: (B,577,769) -> x: (B, 577,768)
                
                def soft_low_rank(x, attn_layer, rank=4):
                    '''
                        x: (B, 577,768)
                    '''
                    B, N, embed_dims = x.shape
                    qkv = attn_layer.qkv(x) # qkv:(4,578,2304)
                    qkv = qkv.reshape(B, N, 3, attn_layer.num_heads,attn_layer.head_dims).permute(2, 0, 3, 1, 4) # qkv:(3,4,12,578,64)
                    Q, K, V = qkv[0], qkv[1], qkv[2] #(4,12,577,64)

                    b, nhead, N, headdim = Q.size()
                    H,W = 24,24
                    Q_landmarks = Q[:,:,1:,:].clone() #Cls token; (4,12,576,64)
                    Q_landmarks = Q_landmarks.reshape(b*nhead, H*W, headdim).reshape(b*nhead, H, W, headdim).permute(0, 3, 1, 2)
                    # Q_landmarks = self.Qlandmark_op(Q_landmarks) #(48,64,3,3)
                    Q_landmarks = Q_landmarks[:,:,:rank,:rank].clone() #(48,64,3,3)
                    Q_landmarks = Q_landmarks.flatten(2).transpose(1, 2).reshape(b, nhead, -1, headdim) #(4,12,9,64)
                    Q_landmarks = self.Qnorm_act(Q_landmarks) #(4,12,9,64)
                    K_landmarks = Q_landmarks #(4,12,9,64)

                    kernel_1_ = self.kernel_function(Q, K_landmarks.transpose(-1, -2).contiguous())
                    kernel_1_ = torch.exp(-kernel_1_/2) #(4,12,577,9)

                    kernel_2_ = self.kernel_function(Q_landmarks, K_landmarks.transpose(-1, -2).contiguous())
                    kernel_2_ = torch.exp(-kernel_2_/2) #(4,12,9,9)

                    kernel_3_ = kernel_1_.transpose(-1, -2) #(4,12,9,577)

                    X = torch.matmul(torch.matmul(kernel_1_, newton_inverse_kernel(kernel_2_, self.max_iter)), torch.matmul(kernel_3_, V))
                    X = X.reshape(B, N, embed_dims)
                    return X
                x = x + layer.attn(layer.norm1(x)) + soft_low_rank(x,layer.attn)
                x = layer.ffn(layer.norm2(x), identity=x)
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(x[:, 0])

        return tuple(outs)