
import torch
import torch.nn as nn

from mmcls.models import BACKBONES
from mmcv.runner.base_module import BaseModule, ModuleList
from medfmc.models.prompt_swin import PromptedPatchMerging, PromptedShiftWindowMSA, PromptedSwinBlock, PromptedSwinBlockSequence, PromptedSwinTransformer, PromptedWindowMSA
from mmcls.models.utils.attention import WindowMSA
from mmcls.models.utils import resize_pos_embed, to_2tuple
from mmcv.cnn.bricks.transformer import (AdaptivePadding, PatchEmbed,
                                         PatchMerging)


from typing import List, Sequence
from copy import deepcopy




class Gate(nn.Module):
    def __init__(self, input_dims: int):
        super(Gate, self).__init__()
        self.fc = nn.Linear(input_dims, 1)

    def forward(self, x):
        g = torch.sigmoid(self.fc(x))
        return g * x

class CAVPT(nn.Module):
    def __init__(self, prompt_length, adapter_dim):
        super().__init__()
        self.prompt_length = prompt_length
        self.hidden_dim = adapter_dim
        self.num_heads = 1
        # Prompt-to-Image Attention
        # self.prompt_to_image_q = nn.Linear(adapter_dim, adapter_dim)
        # self.prompt_to_image_kv = nn.Linear(adapter_dim, adapter_dim * 2)
    
    def forward(self, x):
        prompt_features = x[:,:self.prompt_length,:]
        image_features = x[:,self.prompt_length:,:]

        # prompt_queries = self.prompt_to_image_q(prompt_features)


        # image_kv = self.prompt_to_image_kv(image_features)
        # image_keys, image_values = torch.split(image_kv, self.hidden_dim, dim=-1)

        prompt_scores = torch.matmul(prompt_features, image_features.transpose(-2, -1)) / (self.hidden_dim ** 0.5)
        prompt_scores = F.softmax(prompt_scores, dim=-1)
        prompt_attention = torch.matmul(prompt_scores, image_features)
        out = torch.cat((prompt_attention, image_features),dim=1)
        return out

    
class MLPAdapter(nn.Module):
    def __init__(self, embed_dims, intrinsic_dim=20):
        '''
            proj->MLPAdapter
                1 LN,MLP 
                2 Down,ReLU,Up
            proj:nn.Linear(embed_dims, embed_dims)
        '''
        super().__init__()
        # project layer
        self.proj = nn.Linear(embed_dims, embed_dims)
        # adapter
        self.down_proj = nn.Linear(embed_dims, intrinsic_dim)
        self.non_linear_func = nn.ReLU()
        self.up_proj = nn.Linear(intrinsic_dim, embed_dims)


    def forward(self, x):
        # project layer forward
        proj_x = self.proj(x)
        # adapter forward
        down = self.down_proj(x)
        down = self.non_linear_func(down)
        up = self.up_proj(down)

        # gate function
        gate = Gate(up.shape[-1]).to(up.device)
        up = gate(up)

        output = up + proj_x
        return output
    
class DVPTWindowMSA(PromptedWindowMSA):
    def __init__(self, *args,**kwargs):
        super().__init__(*args,**kwargs)
        self.proj = MLPAdapter(self.embed_dims)


class DVPTShiftWindowMSA(PromptedShiftWindowMSA):
    def __init__(self,
                 embed_dims,
                 num_heads,
                 window_size,
                 shift_size=0,
                 qkv_bias=True,
                 qk_scale=None,
                 attn_drop=0,
                 proj_drop=0,
                 dropout_layer=dict(type='DropPath', drop_prob=0.),
                 pad_small_map=False,
                 input_resolution=None,
                 auto_pad=None,
                 window_msa=WindowMSA,
                 msa_cfg=dict(),
                 init_cfg=None,
                 prompt_length=1,
                 prompt_pos='prepend'):
        super().__init__(
            embed_dims=embed_dims,
            num_heads=num_heads,
            window_size=window_size,
            shift_size=shift_size,
            qkv_bias=qkv_bias,
            qk_scale=qk_scale,
            attn_drop=attn_drop,
            proj_drop=proj_drop,
            dropout_layer=dropout_layer,
            pad_small_map=pad_small_map,
            input_resolution=input_resolution,
            auto_pad=auto_pad,
            window_msa=window_msa,
            msa_cfg=msa_cfg,
            init_cfg=init_cfg)
        self.prompt_length = prompt_length
        self.prompt_pos = prompt_pos
        if self.prompt_pos == 'prepend':
            self.w_msa = DVPTWindowMSA(
                prompt_length,
                prompt_pos,
                embed_dims=embed_dims,
                window_size=to_2tuple(window_size),
                num_heads=num_heads,
                qkv_bias=qkv_bias,
                qk_scale=qk_scale,
                attn_drop=attn_drop,
                proj_drop=proj_drop,
                **msa_cfg,
            )


class DVPTSwinBlock(PromptedSwinBlock):
    def __init__(self,
                 embed_dims,
                 num_heads,
                 window_size=7,
                 shift=False,
                 ffn_ratio=4.,
                 drop_path=0.,
                 pad_small_map=False,
                 attn_cfgs=dict(),
                 ffn_cfgs=dict(),
                 norm_cfg=dict(type='LN'),
                 with_cp=False,
                 init_cfg=None,
                 prompt_length=1,
                 prompt_pos='prepend'):
        super(DVPTSwinBlock, self).__init__(
            embed_dims=embed_dims,
            num_heads=num_heads,
            window_size=window_size,
            shift=shift,
            ffn_ratio=ffn_ratio,
            drop_path=drop_path,
            pad_small_map=pad_small_map,
            attn_cfgs=attn_cfgs,
            ffn_cfgs=ffn_cfgs,
            norm_cfg=norm_cfg,
            with_cp=with_cp,
            init_cfg=init_cfg,
        )
        _attn_cfgs = {
            'embed_dims': embed_dims,
            'num_heads': num_heads,
            'shift_size': window_size // 2 if shift else 0,
            'window_size': window_size,
            'dropout_layer': dict(type='DropPath', drop_prob=drop_path),
            'pad_small_map': pad_small_map,
            'prompt_length': prompt_length,
            'prompt_pos': prompt_pos,
            **attn_cfgs
        }
        self.attn = DVPTShiftWindowMSA(**_attn_cfgs)



class DVPTBlockSequence(PromptedSwinBlockSequence):
    def __init__(self,
                 embed_dims,
                 depth,
                 num_heads,
                 window_size=7,
                 downsample=False,
                 downsample_cfg=dict(),
                 drop_paths=0.,
                 block_cfgs=dict(),
                 with_cp=False,
                 pad_small_map=False,
                 init_cfg=None,
                 prompt_length=1,
                 prompt_pos='prepend'):
        super().__init__(
            embed_dims=embed_dims,
            depth=depth,
            num_heads=num_heads,
            window_size=window_size,
            downsample=downsample,
            downsample_cfg=downsample_cfg,
            drop_paths=drop_paths,
            block_cfgs=block_cfgs,
            with_cp=with_cp,
            pad_small_map=pad_small_map,
            init_cfg=init_cfg)    
        if not isinstance(drop_paths, Sequence):
            drop_paths = [drop_paths] * depth

        if not isinstance(block_cfgs, Sequence):
            block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)]

        self.embed_dims = embed_dims
        self.blocks = ModuleList()
        self.prompt_length = prompt_length
        self.prompt_pos = prompt_pos
        for i in range(depth):
            _block_cfg = {
                'embed_dims': embed_dims,
                'num_heads': num_heads,
                'window_size': window_size,
                'shift': False if i % 2 == 0 else True,
                'drop_path': drop_paths[i],
                'with_cp': with_cp,
                'pad_small_map': pad_small_map,
                'prompt_length': prompt_length,
                'prompt_pos': prompt_pos,
                **block_cfgs[i]
            }
            block = DVPTSwinBlock(**_block_cfg)
            self.blocks.append(block)

        if downsample:
            _downsample_cfg = {
                'in_channels': embed_dims,
                'out_channels': 2 * embed_dims,
                'norm_cfg': dict(type='LN'),
                'prompt_length': prompt_length,
                'prompt_pos': prompt_pos,
                **downsample_cfg
            }
            self.downsample = PromptedPatchMerging(**_downsample_cfg)
        else:
            self.downsample = None






@BACKBONES.register_module()
class DVPTSwinTransformer(PromptedSwinTransformer):
    def __init__(
        self,
        arch='base',
        img_size=224,
        patch_size=4,
        in_channels=3,
        window_size=7,
        drop_path_rate=0.1,
        with_cp=False,
        pad_small_map=False,
        stage_cfgs=dict(),
        patch_cfg=dict(),
        prompt_length=1,
        prompt_layers=None,
        prompt_pos='prepend',
        prompt_init='normal',
    ):
        super().__init__(arch=arch)
        self.prompt_length = prompt_length
        self.prompt_pos = prompt_pos
        self.avgpool = nn.AdaptiveAvgPool1d(1)

        # stochastic depth
        total_depth = sum(self.depths)
        dpr = [
            x.item() for x in torch.linspace(0, drop_path_rate, total_depth)
        ]  # stochastic depth decay rule

        self.stages = ModuleList()
        embed_dims = [self.embed_dims]
        for i, (depth,
                num_heads) in enumerate(zip(self.depths, self.num_heads)):
            if isinstance(stage_cfgs, Sequence):
                stage_cfg = stage_cfgs[i]
            else:
                stage_cfg = deepcopy(stage_cfgs)
            downsample = True if i < self.num_layers - 1 else False
            _stage_cfg = {
                'embed_dims': embed_dims[-1],
                'depth': depth,
                'num_heads': num_heads,
                'window_size': window_size,
                'downsample': downsample,
                'drop_paths': dpr[:depth],
                'with_cp': with_cp,
                'pad_small_map': pad_small_map,
                'prompt_length': prompt_length,
                'prompt_pos': prompt_pos,
                **stage_cfg
            }
            _patch_cfg = dict(
                in_channels=in_channels,
                input_size=img_size,
                embed_dims=self.embed_dims,
                conv_type='Conv2d',
                kernel_size=patch_size,
                stride=patch_size,
                norm_cfg=dict(type='LN'),
            )
            _patch_cfg.update(patch_cfg)
            self.patch_embed = PatchEmbed(**_patch_cfg)

            stage = DVPTBlockSequence(**_stage_cfg)
            self.stages.append(stage)
            
            dpr = dpr[depth:]
            embed_dims.append(stage.out_channels)
        for param in self.parameters():
            param.requires_grad = False

        self.prompt_layers = [0] if prompt_layers is None else prompt_layers
        prompt = torch.empty(
            len(self.prompt_layers), prompt_length, self.embed_dims)
        if prompt_init == 'uniform':
            nn.init.uniform_(prompt, -0.08, 0.08)
        elif prompt_init == 'zero':
            nn.init.zeros_(prompt)
        elif prompt_init == 'kaiming':
            nn.init.kaiming_normal_(prompt)
        elif prompt_init == 'token':
            nn.init.zeros_(prompt)
            self.prompt_initialized = False
        else:
            nn.init.normal_(prompt, std=0.02)
        self.prompt = nn.Parameter(prompt, requires_grad=True)