# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch import nn as nn
from torch.nn import functional as F
from torch.autograd import Function
from .. import builder
from ..builder import FUSION_LAYERS
from mmcv.runner import force_fp32

@FUSION_LAYERS.register_module()
class FeatFusion(BaseModule):
    """Fuse image features from multi-scale features.

    Args:
        img_channels (list[int] | int): Channels of image features.
            It could be a list if the input is multi-scale image features.
        pts_channels (int): Channels of point features
        mid_channels (int): Channels of middle layers
        out_channels (int): Channels of output fused features
        img_levels (int, optional): Number of image levels. Defaults to 3.
        coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.
            Defaults to 'LIDAR'.
        conv_cfg (dict, optional): Dict config of conv layers of middle
            layers. Defaults to None.
        norm_cfg (dict, optional): Dict config of norm layers of middle
            layers. Defaults to None.
        act_cfg (dict, optional): Dict config of activatation layers.
            Defaults to None.
        activate_out (bool, optional): Whether to apply relu activation
            to output features. Defaults to True.
        fuse_out (bool, optional): Whether apply conv layer to the fused
            features. Defaults to False.
        dropout_ratio (int, float, optional): Dropout ratio of image
            features to prevent overfitting. Defaults to 0.
        aligned (bool, optional): Whether apply aligned feature fusion.
            Defaults to True.
        align_corners (bool, optional): Whether to align corner when
            sampling features according to points. Defaults to True.
        padding_mode (str, optional): Mode used to pad the features of
            points that do not have corresponding image features.
            Defaults to 'zeros'.
        lateral_conv (bool, optional): Whether to apply lateral convs
            to image features. Defaults to True.
    """

    def __init__(self,
                 in_channels=[64, 64],
                 out_channels=64,
                 fusion_type="Fusion_block0",
                 conv_cfg=None,
                 norm_cfg=None,
                 act_cfg=None,
                 init_cfg=None,
                 activate_out=True,
                 fuse_out=False,
                 dropout_ratio=0,
                 lateral_conv=True,
                 bev_rnn=None,
                 pose_nn=None,
                 Rpose=False,
                 encode_t=False,
                 encode_t_num=4,
                 **kwargs):
        super(FeatFusion, self).__init__(init_cfg=init_cfg)
        assert isinstance(in_channels, list)
        assert isinstance(out_channels, int)

        self.act_cfg = act_cfg
        self.activate_out = activate_out
        self.fuse_out = fuse_out
        self.dropout_ratio = dropout_ratio
        self.align_mode = kwargs.get('align_mode', 'bilinear')
        self.align_corners = kwargs.get('align_corners', True)
        self.detach = kwargs.get('detach', True)
        self.Rpose = Rpose
        if bev_rnn:
            self.bev_rnn = builder.build_backbone(bev_rnn)
        else:
            self.bev_rnn = False
        if pose_nn:
            self.pose_nn = builder.build_backbone(pose_nn)
        else:
            self.pose_nn = False
        if encode_t:
            self.encode_t = encode_time(c1=1,
                                        c2=encode_t_num,
                                        cout=out_channels)
        else:
            self.encode_t = False
        # todo add paramerters to config
        self.conv = eval(fusion_type)(in_channels[0], 
                                  in_channels[1],
                                   out_channels, 
                                   k=3, s=1, 
                                   pooling_k=3)
    
    @force_fp32()
    def get_Rpose(self, pose):
        # pose0 2 pose1
        pose_ = pose[1].double().inverse() @ pose[0].double()
        pose_ = pose_.float()
        return pose_
    def forward(self, x, Rpose=None,diff_t=None):
        if self.Rpose and Rpose is not None:
            b,_,h,w = x[0].shape
            Rpose = self.get_Rpose(Rpose)
            Rpose = Rpose.view(-1, 16)[:,:8]
            
            Rpose = Rpose.reshape(b, -1, 1, 1).expand(b, -1, h, w)
            x[1] = torch.cat((x[1], Rpose), 1)
        if self.pose_nn:
            x[1] = self.pose_nn(x[1])[0]
        x = self.conv(x)
        if self.encode_t and diff_t is not None:
            n,c,h,w = x.shape
            diff_t = diff_t.view(-1,1,1,1).expand(n,1,h,w)
            x = self.encode_t(x, diff_t)
        if self.bev_rnn:
            x = self.bev_rnn(x)[0]
        return x

def autopad(k, p=None):  # kernel, padding
    # Pad to 'same'
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p


class Conv(nn.Module):
    # Standard convolution
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        # self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
        self.act = nn.ReLU() if act is True else nn.Identity()
    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

class AcModule(nn.Module):
    def __init__(self, k=3):
        super().__init__()
        self.ap = nn.AvgPool2d(kernel_size=k, stride=1, padding=k // 2)
        self.sig = nn.Sigmoid()

    def forward(self, x):
        x = self.ap(x)
        x = self.sig(x)
        return x

class ReluModule(nn.Module):
    def __init__(self, c1, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        self.conv = Conv(c1, cout, k, s, p, g)
        self.ac_module = AcModule(k=pooling_k)

    def forward(self, x):
        x = self.conv(x)
        x1 = self.ac_module(x)

        return x.mul(x1)
    
class Fusion_block0(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        
        self.relu_moudle = nn.Identity()

    def forward(self, x):
        x_cat = torch.cat(x, 1)
        x_cat = self.relu_moudle(x_cat)
        return x_cat

class Fusion_block1(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()

        self.relu_moudle = ReluModule(c1+c2, cout, k, s, p, g, pooling_k)

    def forward(self, x):
        x_cat = torch.cat(x, 1)
        x_cat = self.relu_moudle(x_cat)
        return x_cat

class Fusion_block2(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        self.cv1 = Conv(c1, cout)
        self.cv2 = Conv(c2, cout)

        self.relu_moudle1 = ReluModule(cout, cout, k, s, p, g, pooling_k)
        self.relu_moudle2 = ReluModule(cout, cout, k, s, p, g, pooling_k)

    def forward(self, x):
        x1 = self.cv1(x[0])
        x2 = self.cv2(x[1])

        x1 = self.relu_moudle1(x1)
        x2 = self.relu_moudle2(x2)
        return x1.add(x2)

class Fusion_block3(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()

        self.cv1 = Conv(c1, cout)
        self.cv2 = Conv(c2, cout)
        self.relu_moudle = ReluModule(cout, cout, k, s, p, g, pooling_k)

    def forward(self, x):
        x1 = self.cv1(x[0])
        x2 = self.cv2(x[1])

        x2 = self.relu_moudle(x2)
        return x1.add(x2)

class Fusion_block4(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()

        self.cv1 = Conv(c1, cout)
        self.cv2 = Conv(c2, cout)
        self.cv3 = Conv(2 * cout, cout, k, s, p, g)
        self.ac_module = AcModule(k=pooling_k)

    def forward(self, x):
        x1 = self.cv1(x[0])
        x2 = self.cv2(x[1])

        x_cat = torch.cat([x1, x2], 1)
        x_cat = self.cv3(x_cat)
        x_cat = self.ac_module(x_cat)
        x2 = x2.mul(x_cat)
        return x1.add(x2)

class Fusion_block5(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        
        self.relu_moudle = nn.Identity()

    def forward(self, x):
        x_cat = x[0] + x[1]
        x_cat = self.relu_moudle(x_cat)
        return x_cat

class LSTM_CNN(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        assert c2 == cout
        self.cv1 = Conv(c1+c2, cout, act=False)
        self.cv2 = Conv(c1+c2, cout, act=False)
        self.cv3 = Conv(c1+c2, cout, act=False)
        self.relu_moudle = nn.Identity()

    def forward(self, x): # x0 ->xt, x1 -> ht-1
        ct = x[1]
        x_cat = torch.cat(x, 1)

        ft = torch.sigmoid(self.cv1(x_cat))
        it = torch.sigmoid(self.cv2(x_cat))
        ct_ = torch.tanh(self.cv3(x_cat))

        ct = ct * ft
        ct = ct + it * ct_
        return ct

class GRU_CNN(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=1, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        assert c2 == cout
        self.cv1 = Conv(c1+c2, cout, act=False)
        self.cv2 = Conv(c1+c2, cout, act=False)
        self.cv3 = Conv(c1+c2, cout, act=False)
        self.cv4 = Conv(c2, cout)

    def forward(self, x): 
        # x0 ->curr, x1 -> pre
        xt, ht_ = x[0], x[1]
        x_cat = torch.cat(x, 1)
        rt = torch.sigmoid(self.cv1(x_cat))
        zt = torch.sigmoid(self.cv2(x_cat))
        x_cat1 = torch.cat([xt, rt*ht_], 1)
        _ht = torch.tanh(self.cv3(x_cat1))
        ht = (1-zt)*ht_ + zt * (_ht)
        ht = self.cv4(ht)
        return ht

class encode_time(nn.Module):
    def __init__(self, c1=1, c2=4,cout=None, k=1, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        assert c1 == 1
        self.cv1 = Conv(c1, c2, act=False)
        self.cv2 = Conv(cout+c2, cout)
    def forward(self, x, t): 
        return self.cv2(torch.cat([x,self.cv1(t)],1))

class grid_sample(Function):

    @staticmethod
    def forward(ctx, feat: torch.Tensor, grid: torch.Tensor,
                mode, padding_mode, align_corners):
        """
        for deploy to onnx
        input:
        feat: B x c x h x w
        pose1: B x h x w x 2

        """
        shape = feat.shape
        output = torch.zeros(size=shape, device=feat.device, dtype=feat.dtype)

        # output = torch.zeros_like(feat)

        return output

    @staticmethod
    def symbolic(g, *inputs):
        return g.op("HJ_GridSample", inputs[0], inputs[1], \
         mode_i=inputs[2], padding_mode_i=inputs[3], align_corners_i=inputs[4])

grid_sample_ = grid_sample.apply

if __name__ == '__main__':
    
    N, H, W, C = 4, 256, 128, 32
    C2 = 128
    Cout = 16

    input = torch.rand(N, C, H, W)
    print("Input shape: ", input.shape)
    to_fusion = torch.rand(N, C2, H, W)
    print("to_fusion shape: ", to_fusion.shape)
    print("Output channel: ", Cout)


    acmodule = AcModule()
    x = acmodule(input)
    print("After acmodule shape: ", x.shape)

    relumodule = ReluModule(C, C//2, k=1, s=1)
    x = relumodule(input)
    print("After relumodule shape: ", x.shape)

    block1 = Fusion_block1(C, C2, Cout)
    x = block1([input, to_fusion])
    print("After block1 shape: ", x.shape)

    block2 = Fusion_block2(C, C2, Cout)
    x = block2([input, to_fusion])
    print("After block2 shape: ", x.shape)

    block3 = Fusion_block3(C, C2, Cout)
    x = block3([input, to_fusion])
    print("After block3 shape: ", x.shape)

    block4 = Fusion_block4(C, C2, Cout)
    x = block4([input, to_fusion])
    print("After block4 shape: ", x.shape)

    gru_cnn = GRU_CNN(C, C2, Cout)
    x = gru_cnn([input, to_fusion])
    print("After gru_cnn shape: ", x.shape)