
from medfmc.models.deit3 import DeiT3, DeiT3TransformerEncoderLayer
from mmcls.models import BACKBONES
import torch
import math
from medfmc.models.utils.attention import MultiheadAttention
from medfmc.models.utils.embed import resize_pos_embed

def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None) -> torch.Tensor:
    L, S = query.size(-2), key.size(-2)
    scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
    attn_bias = torch.zeros(L, S, dtype=query.dtype)
    if is_causal:
        assert attn_mask is None
        temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
        attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
        attn_bias.to(query.dtype)

    if attn_mask is not None:
        if attn_mask.dtype == torch.bool:
            attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf"))
        else:
            attn_bias += attn_mask
    attn_weight = query @ key.transpose(-2, -1) * scale_factor
    attn_weight += attn_bias.to(attn_weight.device)
    attn_weight = torch.softmax(attn_weight, dim=-1)
    attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
    return attn_weight @ value, attn_weight

def attention_output(cls, x):
    B, N, _ = x.shape
    qkv = cls.qkv(x).reshape(B, N, 3, cls.num_heads,
                                cls.head_dims).permute(2, 0, 3, 1, 4)
    q, k, v = qkv[0], qkv[1], qkv[2]

    attn_drop = cls.attn_drop if cls.training else 0.
    x, qk_out = scaled_dot_product_attention(q, k, v, dropout_p=attn_drop)
    x = x.transpose(1, 2).reshape(B, N, cls.embed_dims)

    x = cls.proj(x)
    x = cls.out_drop(cls.gamma1(cls.proj_drop(x)))

    if cls.v_shortcut:
        x = v.squeeze(1) + x
    return x, qk_out

def layer_output(cls, x):
    attn, qk_out = cls.attn(cls.ln1(x))
    x = x + attn
    x = cls.ffn(cls.ln2(x), identity=x)
    return x, qk_out

@BACKBONES.register_module()
class FullClip(DeiT3):
    def __init__(self,
                 output_attentions=False,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.output_attentions = output_attentions
        if self.output_attentions:
            for layer in self.layers:
                layer.forward = layer_output.__get__(layer, DeiT3TransformerEncoderLayer)
                layer.attn.forward = attention_output.__get__(layer.attn, MultiheadAttention)


    def forward(self, x):
        B = x.shape[0]
        x, patch_resolution = self.patch_embed(x)

        if self.cls_token is not None:
            # stole cls_tokens impl from Phil Wang, thanks
            cls_token = self.cls_token.expand(B, -1, -1)
            x = torch.cat((cls_token, x), dim=1)

        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens)
        x = self.drop_after_pos(x)

        x = self.pre_norm(x)

        outs = []
        attentions = []
        for i, layer in enumerate(self.layers):
            if self.output_attentions:
                x, attention = layer(x)
                attentions.append(attention)
            else:
                x = layer(x)

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.ln1(x)

            if i in self.out_indices:
                outs.append(self._format_output(x, patch_resolution))

        if self.output_attentions:
            return tuple(outs),attentions
        else:
            return tuple(outs)
