import torch.nn as nn
# from backbone import Resnet50
import torch
import torch.nn.functional as F


class HybridEncoder(nn.Module):
    
    def __init__(self, fea_size=[512, 1024, 2048], hidden_dim=256):
        super().__init__()
        self.convnorm = nn.ModuleList([ConvNormLayer(in_channel=i, out_channel=hidden_dim) for i in fea_size])
        self.encoder = TransformerEncoder(size=15)  #640:20 480:15
        self.upsamle = nn.ModuleList([
            ConvNormLayer(in_channel=256, out_channel=256, s=1, k=1, act='silu'),
            ConvNormLayer(in_channel=256, out_channel=256, s=1, k=1, act='silu'),
        ])
        self.downsample = nn.ModuleList([
            ConvNormLayer(in_channel=256, out_channel=256, s=2, k=3, p=1, act='silu'),
            ConvNormLayer(in_channel=256, out_channel=256, s=2, k=3, p=1, act='silu'),
        ])
        self.fusion = nn.ModuleList([
            CSPLayer(),
            CSPLayer(),
            CSPLayer(),
        ])
        
    def forward(self, x):
        #top->down
        fea_lvl1 = self.convnorm[0](x[0])
        fea_lvl2 = self.convnorm[1](x[1])
        fea_lvl3 = self.convnorm[2](x[2])
        fea_lvl3 = self.encoder(fea_lvl3)
        fea_lvl3 = self.upsamle[0](fea_lvl3)
        lat_1 = F.interpolate(fea_lvl3, scale_factor=2., mode='nearest')
        fea_lvl2 = self.fusion[0](torch.concat((lat_1, fea_lvl2), dim = 1))
        fea_lvl2 = self.upsamle[1](fea_lvl2)
        lat_2 = F.interpolate(fea_lvl2, scale_factor=2., mode='nearest')
        fea_lvl1 = self.fusion[1](torch.concat((lat_2, fea_lvl1), dim = 1))

        #down->top
        fea_lvl2 = self.fusion[0](torch.concat((self.downsample[0](fea_lvl1), fea_lvl2), dim = 1))
        fea_lvl3 = self.fusion[2](torch.concat((self.downsample[1](fea_lvl2), fea_lvl3), dim = 1))

        return [fea_lvl1, fea_lvl2, fea_lvl3]


class TransformerEncoder(nn.Module):

    def __init__(self, size, hidden_dim=256):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim=hidden_dim, num_heads=8)
        self.norm1 = nn.LayerNorm(normalized_shape=hidden_dim)
        self.norm2 = nn.LayerNorm(normalized_shape=hidden_dim)
        self.linear1 = nn.Linear(hidden_dim, hidden_dim * 4)
        self.linear2 = nn.Linear(hidden_dim * 4, hidden_dim)
        self.act = nn.GELU()
        self._set_parameters(size)

    def _set_parameters(self, size):
        pos_embed = self.build_2d_sincos_position_embedding(w=size, h=size)
        setattr(self, 'pos_embed', pos_embed)

    @staticmethod
    def build_2d_sincos_position_embedding(w=15, h=15, embed_dim=256, temperature=10000.): #640:20  480:15
        '''
        '''
        grid_w = torch.arange(int(w), dtype=torch.float32)
        grid_h = torch.arange(int(h), dtype=torch.float32)
        grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing='ij')
        assert embed_dim % 4 == 0, \
            'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
        pos_dim = embed_dim // 4
        omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
        omega = 1. / (temperature ** omega)

        out_w = grid_w.flatten()[..., None] @ omega[None]  #h*w x pos_dim
        out_h = grid_h.flatten()[..., None] @ omega[None]

        return torch.concat([out_w.sin(), out_w.cos(), out_h.sin(), out_h.cos()], dim=1)[None, :, :]
    
    @staticmethod
    def with_pos_embed(src, pos_embed=None):
        return src + pos_embed.to(src.device) if pos_embed is not None else src
        
    def forward(self, x):
        residual = x.flatten(2).permute(0, 2, 1)
        q = k = self.with_pos_embed(residual, getattr(self, 'pos_embed'))
        out, _ = self.attn(q, k, value = residual)
        residual = self.norm1(out + residual)
        out = self.act(self.linear1(residual))
        out = self.linear2(out)
        out = self.norm2(residual + out)
        return out.permute(0, 2, 1).reshape(-1, 256, 15, 15).contiguous()  #640:20  480:15


class ConvNormLayer(nn.Module):

    def __init__(self, in_channel, out_channel, k=1, s=1, p=0, act='relu'):
        super().__init__()
        self.layer = nn.Sequential(
            nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=k, stride=s, padding=p, bias=False),
            nn.BatchNorm2d(num_features=out_channel))
        self.act = get_activation(act)
        
    def forward(self, x):
        x = self.act(self.layer(x))
        return x
    

class RepvggBlock(nn.Module):

    def __init__(self, in_chan=256, out_chan=256):
        super().__init__()
        self.conv1 = ConvNormLayer(in_channel=in_chan, out_channel=out_chan, k=3, s=1, p=1)
        self.conv2 = ConvNormLayer(in_channel=in_chan, out_channel=out_chan, k=1, s=1)

    def forward(self, x):
        x = self.conv1(x) + self.conv2(x)
        return x


class CSPLayer(nn.Module):

    def __init__(self, num_blocks=3):
        super().__init__()
        self.rep = nn.Sequential(*[RepvggBlock() for _ in range(num_blocks)])
        self.conv1 = ConvNormLayer(in_channel=512, out_channel=256)
        self.conv2 = ConvNormLayer(in_channel=512, out_channel=256)

    def forward(self, x):
        x_1 = self.conv1(x)
        x_2 = self.rep(self.conv2(x))
        return x_1 + x_2


class MLP(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, act='relu'):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
        self.act = nn.Identity() if act is None else get_activation(act)

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x
    
    
def get_activation(act):
    if act == 'relu':
        return nn.ReLU()

    if act == 'silu':
        return nn.SiLU()
    

# if __name__ == "__main__":
#     x = torch.randn([2,3,640,640])
#     backbone = Resnet50()
#     model = HybridEncoder()
#     x = backbone(x)
#     x = model(x)

