import torch
import torch.nn as nn

from mmcls.models import BACKBONES
from mmcls.models.backbones import VisionTransformer

from mmcls.models.utils import resize_pos_embed
from typing import List
from mmcv.runner.base_module import BaseModule, ModuleList
import torch.nn.functional as F

class Gate(nn.Module):
    def __init__(self, input_dims: int):
        super(Gate, self).__init__()
        self.fc = nn.Linear(input_dims, 1)

    def forward(self, x):
        g = torch.sigmoid(self.fc(x))
        return g * x




class Adapter(nn.Module):
    def __init__(self, embed_dims, adapter_dim=20):
        super().__init__()
        self.down_sample=nn.Linear(embed_dims, adapter_dim)
        self.activation = nn.GELU()
        self.up_sample = nn.Linear(adapter_dim, embed_dims)

    def forward(self,x):
        x = self.down_sample(x)
        x = self.activation(x)
        x = self.up_sample(x)
        return x



@BACKBONES.register_module()
class AdaptVPT(VisionTransformer):
    def __init__(self,
                adapter_dim: int = 20,
                 adapter_init: str = 'normal',
                 adapter_layers: List[int] = None,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.adapter_layers = adapter_layers if adapter_layers else range(len(self.layers))

        for layer in self.adapter_layers:
            adapter = Adapter(self.embed_dims, adapter_dim)
            if adapter_init == 'uniform':
                nn.init.uniform_(adapter.down_sample.weight, -0.1, 0.1)
                nn.init.uniform_(adapter.up_sample.weight, -0.1, 0.1)
            elif adapter_init == 'normal':
                nn.init.normal_(adapter.down_sample.weight, std=0.02)
                nn.init.normal_(adapter.up_sample.weight, std=0.02)

            self.add_module(f'adapter_{layer}', adapter)
    def forward(self, x):
        """Following mmcls implementation."""
        B = x.shape[0]
        x, patch_resolution = self.patch_embed(x)

        # stole cls_tokens impl from Phil Wang, thanks
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        x = x + resize_pos_embed(
            self.pos_embed,
            self.patch_resolution,
            patch_resolution,
            mode=self.interpolate_mode,
            num_extra_tokens=self.num_extra_tokens)
        x = self.drop_after_pos(x)

        if not self.with_cls_token:
            # Remove class token for transformer encoder input
            x = x[:, 1:]

        outs = []
        for i, layer in enumerate(self.layers):
            # self attntion
            attn_x = x + layer.attn(layer.norm1(x))
            ffn_x = layer.ffn(layer.norm2(attn_x), identity=attn_x) 
            # add adapter
            if i in self.adapter_layers:
                ada_x = getattr(self, f'adapter_{i}')(attn_x)
                gate = Gate(ada_x.shape[-1]).to(ada_x.device)
                ada_x = gate(ada_x)
                ffn_x = ada_x+ffn_x
            x = ffn_x

            if i == len(self.layers) - 1 and self.final_norm:
                x = self.norm1(x)

            if i in self.out_indices:
                outs.append(x[:, 0])

        return tuple(outs)


