import torch
import torch.nn as nn

from mmcls.models.utils.attention import MultiheadAttention
from .lora_layers import Linear

from functools import partial

from mmcls.models.backbones.vision_transformer import TransformerEncoderLayer, VisionTransformer
from mmcls.models import BACKBONES

from mmcls.models.utils import resize_pos_embed



        
@BACKBONES.register_module()
class LoRAVisionTransformer(VisionTransformer):

    def __init__(self,
                 prompt_length,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        for param in self.parameters():
            param.requires_grad = False

        head_dims = self.layers[0].attn.head_dims
        self.lora_layers = [0]
        for i, layer in enumerate(self.lora_layers):
            lora_query = Linear(head_dims, head_dims, r=4, bias=False)
            lora_value = Linear(head_dims, head_dims, r=4, bias=False)
            self.add_module(f'lora_query_{i}', lora_query)
            self.add_module(f'lora_value_{i}', lora_value)
    
    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0]
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens)
        x = self.drop_after_pos(x)


        if not self.with_cls_token:
            # Remove class token for transformer encoder input
            x = x[:, 1:]

        outs = []
        for i, layer in enumerate(self.layers):
            # self attntion
            if i in self.lora_layers:
                def lora_attention(x, attn_layer, lora_query,lora_value):
                    B, N, _ = x.shape # x:(4,578,768)
                    qkv = attn_layer.qkv(x) # qkv:(4,578,2304)
                    qkv = qkv.reshape(B, N, 3, attn_layer.num_heads,attn_layer.head_dims).permute(2, 0, 3, 1, 4) # qkv:(3,4,12,578,64)
                    q, k, v = qkv[0], qkv[1], qkv[2]

                    q_delta = lora_query(q)
                    v_delta = lora_value(v)
                    q = q.contiguous() + q_delta
                    v = v.contiguous() + v_delta

                    attn = (q @ k.transpose(-2, -1)) * attn_layer.scale
                    attn = attn.softmax(dim=-1)
                    attn = attn_layer.attn_drop(attn)

                    x = (attn @ v).transpose(1, 2).reshape(B, N, attn_layer.embed_dims)
                    x = attn_layer.proj(x)
                    x = attn_layer.out_drop(attn_layer.proj_drop(x))

                    if attn_layer.v_shortcut:
                        x = v.squeeze(1) + x
                    return x
                
                
                lora_query = getattr(self, f'lora_query_{i}')
                lora_value = getattr(self, f'lora_value_{i}')

                attn_x = x + lora_attention(layer.norm1(x),layer.attn,lora_query, lora_value)
                # attn_x = x + layer.attn(layer.norm1(x))
                x = layer.ffn(layer.norm2(attn_x), identity=attn_x) 
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(x[:, 0])

        return tuple(outs)
    
