import torch
import torch.nn as nn
from medfmc.models.deit3 import DeiT3
from mmcls.models import BACKBONES
from mmcls.models.backbones import VisionTransformer
from mmcls.models.backbones.vision_transformer import TransformerEncoderLayer
from mmcls.models.utils import resize_pos_embed
from typing import List
from mmcv.runner.base_module import BaseModule, ModuleList

from mmcls.models.utils.attention import MultiheadAttention
import math

def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None) -> torch.Tensor:
    L, S = query.size(-2), key.size(-2)
    scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
    attn_bias = torch.zeros(L, S, dtype=query.dtype)
    if is_causal:
        assert attn_mask is None
        temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
        attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
        attn_bias.to(query.dtype)

    if attn_mask is not None:
        if attn_mask.dtype == torch.bool:
            attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
        else:
            attn_bias += attn_mask
    attn_weight = query @ key.transpose(-2, -1) * scale_factor
    attn_weight += attn_bias.to(attn_weight.device)
    attn_weight = torch.softmax(attn_weight, dim=-1)
    attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
    return attn_weight @ value, attn_weight

def attention_output(cls, x):
    B, N, _ = x.shape
    qkv = cls.qkv(x).reshape(B, N, 3, cls.num_heads,
                                cls.head_dims).permute(2, 0, 3, 1, 4)
    q, k, v = qkv[0], qkv[1], qkv[2]

    attn_drop = cls.attn_drop if cls.training else 0.
    x, qk_out = scaled_dot_product_attention(q, k, v, dropout_p=attn_drop)
    x = x.transpose(1, 2).reshape(B, N, cls.embed_dims)

    x = cls.proj(x)
    x = cls.out_drop(cls.gamma1(cls.proj_drop(x)))

    if cls.v_shortcut:
        x = v.squeeze(1) + x
    return x, qk_out

def layer_output(cls, x):
    attn, qk_out = cls.attn(cls.ln1(x))
    x = x + attn
    x = cls.ffn(cls.ln2(x), identity=x)
    return x, qk_out


@BACKBONES.register_module()
# class FullVIT(VisionTransformer):
class FullVIT(DeiT3):

    def __init__(self,
                 frozen_stages: int = 0,
                 output_attentions=False,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.output_attentions = output_attentions
        if self.output_attentions:
            for layer in self.layers:
                layer.forward = layer_output.__get__(layer, TransformerEncoderLayer)
                layer.attn.forward = attention_output.__get__(layer.attn, MultiheadAttention)
        # freeze stages only when self.frozen_stages > 0
        self.frozen_stages = frozen_stages
        if self.frozen_stages > 0:
            self._freeze_stages()
        # for param in self.parameters():
        #     param.requires_grad = False
            
        # 解冻每一层的 FFN 的最后一个线性层
        # for layer in self.layers:
        #     if hasattr(layer, 'ffn'):  # 假设 FFN 的最后一个线性层为 linear2
        #         for param in layer.ffn.layers[1].parameters():
        #             param.requires_grad = True
        
        
        # for name, param in self.named_parameters():
        #     if param.requires_grad == True:
        #         print(name)
            
    # def _format_output(self, x, hw):
    #     if self.out_type == 'raw':
    #         return x
    #     if self.out_type == 'cls_token':
    #         return x[:, 0]

    #     patch_token = x[:, self.num_extra_tokens:]
    #     if self.out_type == 'featmap':
    #         B = x.size(0)
    #         # (B, N, C) -> (B, H, W, C) -> (B, C, H, W)
    #         return patch_token.reshape(B, *hw, -1).permute(0, 3, 1, 2)
    #     if self.out_type == 'avg_featmap':
    #         return self.ln2(patch_token.mean(dim=1))

    def _freeze_stages(self):
        # freeze position embedding
        if self.pos_embed is not None:
            self.pos_embed.requires_grad = False
        # set dropout to eval model
        self.drop_after_pos.eval()
        # freeze patch embedding
        self.patch_embed.eval()
        for param in self.patch_embed.parameters():
            param.requires_grad = False
        # freeze pre-norm
        for param in self.pre_norm.parameters():
            param.requires_grad = False
        # freeze cls_token
        if self.cls_token is not None:
            self.cls_token.requires_grad = False
        # freeze layers
        for i in range(1, self.frozen_stages + 1):
            m = self.layers[i - 1]
            m.eval()
            for param in m.parameters():
                param.requires_grad = False
        # freeze the last layer norm
        if self.frozen_stages == len(self.layers):
            if self.final_norm:
                self.ln1.eval()
                for param in self.ln1.parameters():
                    param.requires_grad = False

            if self.out_type == 'avg_featmap':
                self.ln2.eval()
                for param in self.ln2.parameters():
                    param.requires_grad = False




    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0]
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens)
        x = self.drop_after_pos(x)


        if not self.with_cls_token:
            # Remove class token for transformer encoder input
            x = x[:, 1:]

        outs = []
        attentions = []
        for i, layer in enumerate(self.layers):
            if self.output_attentions:
                x, attention = layer(x)
                attentions.append(attention)
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(self._format_output(x, patch_resolution))
        if self.output_attentions:
            return tuple(outs),attentions
        else:
            return tuple(outs)
