import torch
import torch.nn as nn
from mmcls.models import BACKBONES
from mmcls.models.backbones import VisionTransformer
from mmcls.models.utils import resize_pos_embed
from typing import List
from SOFT.kernel.subtraction import subtraction_gaussian_kernel
from SOFT.kernel.inverse import newton_inverse_kernel


from medfmc.models.LoRA.lora_layers import Linear


device = 'cuda:0'



def subtraction_gaussian_kernel_torch(q, k):
    # [B, H, H1*W1, C] @ [C, H2*W2] -> [B, H, H1*W1, H2*W2]
    matA_square = q ** 2. @ torch.ones(k.shape[-2:]).cuda()
    # [H1*W1, C] @ [B, H, C, H2*W2] -> [B, H, H1*W1, H2*W2]
    matB_square = torch.ones(q.shape[-2:]).cuda() @ k ** 2.
    return matA_square + matB_square - 2. * (q @ k)



@BACKBONES.register_module()
class SOFTVIT(VisionTransformer):

    def __init__(self,
                 prompt_length: int = 1,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        for param in self.parameters():
            param.requires_grad = False

        self.num_heads, self.head_dims = self.layers[0].attn.num_heads,self.layers[0].attn.head_dims
        self.max_iter=20
        # self.Qlandmark_op = nn.Conv2d(self.head_dims, self.head_dims, kernel_size=8, stride=8, bias=False)
        self.Qnorm_act = nn.Sequential(nn.LayerNorm(self.head_dims), nn.GELU())
        self.kernel_function = subtraction_gaussian_kernel_torch


    
    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0] # batch size
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1) # x: (B,577,768)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens) # x: (B,577,768)
        x = self.drop_after_pos(x) # x: (B,577,768)


        outs = []


        for i, layer in enumerate(self.layers):
            if i<12:
                def soft_low_rank(x, attn_layer, rank=3):
                    '''
                        x: (B, 577,768)
                    '''
                    B, N, embed_dims = x.shape
                    qkv = attn_layer.qkv(x) # qkv:(4,578,2304)
                    qkv = qkv.reshape(B, N, 3, attn_layer.num_heads,attn_layer.head_dims).permute(2, 0, 3, 1, 4) # qkv:(3,4,12,578,64)
                    Q, K, V = qkv[0], qkv[1], qkv[2] #(4,12,577,64)

                    b, nhead, N, headdim = Q.size()
                    H,W = 24,24
                    Q_landmarks = Q[:,:,1:,:].clone() #Cls token; (4,12,576,64)
                    Q_landmarks = Q_landmarks.reshape(b*nhead, H*W, headdim).reshape(b*nhead, H, W, headdim).permute(0, 3, 1, 2) #(48,64,24,24)
                    # Q_landmarks = self.Qlandmark_op(Q_landmarks) #(48,64,3,3)
                    #============选固定的Q landmark==================================
                    Q_landmarks = Q_landmarks[:,:,:rank,:rank].clone() #(48,64,3,3)
                    Q_landmarks = Q_landmarks.flatten(2).transpose(1, 2).reshape(b, nhead, -1, headdim) #(4,12,9,64)
                    #================================================================

                    #===========选最大的Q landmark=====================================
                    # Q_landmarks_flat = Q_landmarks.view(Q_landmarks.size(0), Q_landmarks.size(1), -1)
                    # topk_indices = Q_landmarks_flat.argsort(dim=-1, descending=True)[:, :, :rank*rank]
                    # result = torch.gather(Q_landmarks_flat, dim=-1, index=topk_indices)
                    # result = result.view(Q_landmarks.size(0), Q_landmarks.size(1), rank, rank)
                    # Q_landmarks = result.flatten(2).transpose(1, 2).reshape(b, nhead, -1, headdim) #(4,12,9,64)
                    #======================================================================

                    Q_landmarks = self.Qnorm_act(Q_landmarks) #(4,12,9,64)
                    K_landmarks = Q_landmarks #(4,12,9,64)

                    kernel_1_ = self.kernel_function(Q, K_landmarks.transpose(-1, -2).contiguous())
                    kernel_1_ = torch.exp(-kernel_1_/2) #(4,12,577,9)

                    kernel_2_ = self.kernel_function(Q_landmarks, K_landmarks.transpose(-1, -2).contiguous())
                    kernel_2_ = torch.exp(-kernel_2_/2) #(4,12,9,9)

                    kernel_3_ = kernel_1_.transpose(-1, -2) #(4,12,9,577)

                    # V = self.lora_value(V)

                    X = torch.matmul(torch.matmul(kernel_1_, newton_inverse_kernel(kernel_2_, self.max_iter)), torch.matmul(kernel_3_, V))

                    X = X.reshape(B, N, embed_dims)
                    return X
                # x = x + soft_low_rank(x,layer.attn)
                x = x + layer.attn(layer.norm1(x)) + soft_low_rank(x,layer.attn)
                x = layer.ffn(layer.norm2(x), identity=x)
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(x[:, 0])

        return tuple(outs)