# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch import nn as nn
from torch.nn import functional as F
from torch.autograd import Function
from mmdet.models import builder
# from ..builder import FUSION_LAYERS
from mmcv.runner import force_fp32
from mmdet.models.builder import NECKS
from mmdet.models import (
    DETECTORS,
    BaseDetector,
    build_backbone,
    build_head,
    build_neck,
)
import numpy as np
class bev_bank(object):
    def __init__(self):
        self.timestamp = None
        self.feat = None
        self.pose = None
        self.scene = None

    def update(self,timestamp, feat, pose, scene):
        (
            self.timestamp, 
            self.feat, 
            self.pose, 
            self.scene
        )= timestamp.clone().detach(), feat.clone().detach(), pose.clone().detach(), scene

    def _get(self, timestamp, feat, pose, scene):
        is_contigous = self.check_contigous(scene)
        if not isinstance(is_contigous,list):
            self.feat = feat.clone().detach()
            self.timestamp = timestamp.clone().detach()
            self.pose = pose.clone().detach()
            self.feat = feat.clone().detach()
            self.timestamp = timestamp.clone().detach()
        else:
            for i in range(len(is_contigous)):
                if not is_contigous[i]:
                    self.feat[i] = feat[i].clone().detach()
                    self.timestamp[i] = timestamp[i].clone().detach()
                    self.pose[i] = pose[i].clone().detach()
        dPose= (self.pose.double().inverse() @ pose.double()).float()
        dt = (timestamp-self.timestamp).float()
        return self.feat, dPose, dt,timestamp,pose,scene
    def get(self,feat,data):
        # B = feat.shape[0]
        pose = torch.stack([torch.from_numpy(d['T_global']).cuda() for d in data["img_metas"]])
        timestamp = data['timestamp']
        if "scene_token" in data['img_metas']:
            key = "scene_token"
        else:
            key = "scene_id"
        scene_id = [d[key] for d in data['img_metas']]
        return self._get(timestamp, feat, pose, scene_id)
    def check_contigous(self, scene):
        # return False
        if (self.scene is None) or (len(self.scene) != len(scene)):
            return False
        return [True if self.scene[i] == scene[i] else\
             False for i in range(len(self.scene)) ]

@NECKS.register_module()
class AlignBevFeat(BaseModule):
    def __init__(self,
                 vs,
                 pcr,
                 align_corners=False,
                 init_cfg=None,
                 **kwargs):
        super(AlignBevFeat, self).__init__(init_cfg=init_cfg)
        self.vs = vs
        self.pcr = pcr
        self.MemBank=bev_bank()
        self.align_corners=align_corners
        h = np.int(np.round((pcr[4] - pcr[1])/vs[1]))
        w = np.int(np.round((pcr[3] - pcr[0])/vs[0]))
        xs = torch.linspace(
                0, w - 1, w, dtype=torch.float32,
               ).view(1, w).expand(h, w)
        ys = torch.linspace(
                0, h - 1, h, dtype=torch.float32,
                ).view(h, 1).expand(h, w)
        grid = torch.stack((xs, ys, torch.ones_like(xs)), -1)
        self.grid = grid.cuda()
        self.h,self.w = h,w                          
    def gen_grid(self, dpose):
        n = dpose.shape[0]
        h,w = self.h,self.w
        grid = self.grid.view(1, h, w, 3).expand(n, h, w, 3).view(n, h, w, 3, 1)

        # pose = pose0.double().inverse() @ pose1.double()
        pose=dpose
        pose = pose.float().view(n,1,1,4,4)
        pose = pose[:, :, :,
                      [True, True, False, True], :][:, :, :, :,
                                                    [True, True, False, True]]
        # l02l1 = c12l0.inverse() @ c02l0
        # l02l1 = l02l1.float().view(n,1,1,4,4)
        feat2bev = torch.zeros((3, 3), dtype=grid.dtype).to(grid)
        feat2bev[0, 0] = self.vs[0]
        feat2bev[1, 1] = self.vs[1]
        feat2bev[0, 2] = self.pcr[0]
        feat2bev[1, 2] = self.pcr[1]
        feat2bev[2, 2] = 1
        feat2bev = feat2bev.view(1, 3, 3)
        tf = torch.inverse(feat2bev).matmul(pose).matmul(feat2bev)

        # transform and normalize
        grid = tf.matmul(grid)
        normalize_factor = torch.tensor([w - 1.0, h - 1.0],
                                        dtype=pose.dtype,
                                        device=pose.device)
        grid = grid[:, :, :, :2, 0] / normalize_factor.view(1, 1, 1,
                                                            2) * 2.0 - 1.0
        return grid

    @force_fp32()
    def align_feat(self, feat, dpose):
        grid = self.gen_grid(dpose)
        output = F.grid_sample(feat, grid, align_corners=self.align_corners)
        return output
    def forward(self, bev_feat,**data): 
        pre_bev_feat,dPose,dt,timestamp,pose,scene = self.MemBank.get(bev_feat, data)
        pre_bev_feat = self.align_feat(pre_bev_feat,dPose)
        self.MemBank.update(timestamp, bev_feat, pose, scene)
        return pre_bev_feat,dt

@NECKS.register_module()
class RnnFusion(BaseModule):
    def __init__(self,
                 alignfeat=dict(),
                 featfusion=dict(),
                 init_cfg=None, 
                **kwargs):
        super(RnnFusion, self).__init__(init_cfg=init_cfg)
        self.alignfeat=build_neck(alignfeat)
        self.featfusion=build_neck(featfusion)
    def forward(self, 
                feat,         
                metas: dict,
                ):
        pre_feat,dt = self.alignfeat(feat, **metas)
        bevfeat = self.featfusion([pre_feat,feat],diff_t=dt)
        return bevfeat
    def forward_deploy(self, feat,pre_feat,dpose,dt):
        pre_feat = self.alignfeat.align_feat(pre_feat,dpose)
        bevfeat = self.featfusion([pre_feat,feat],diff_t=dt)
        return bevfeat
    
@NECKS.register_module()
class FeatFusion(BaseModule):
    def __init__(self,
                 in_channels=[64, 64],
                 out_channels=64,
                 fusion_type="Fusion_block0",
                 align_feat = dict,
                 align=True,
                 conv_cfg=None,
                 norm_cfg=None,
                 act_cfg=None,
                 init_cfg=None,
                 activate_out=True,
                 fuse_out=False,
                 dropout_ratio=0,
                 bev_rnn=None,
                 pose_nn=None,
                 Rpose=False,
                 encode_t=False,
                 encode_t_num=4,
                 **kwargs):
        super(FeatFusion, self).__init__(init_cfg=init_cfg)
        assert isinstance(in_channels, list)
        assert isinstance(out_channels, int)

        self.act_cfg = act_cfg
        self.activate_out = activate_out
        self.fuse_out = fuse_out
        self.dropout_ratio = dropout_ratio
        self.align_mode = kwargs.get('align_mode', 'bilinear')
        self.align_corners = kwargs.get('align_corners', True)
        self.detach = kwargs.get('detach', True)
        self.Rpose = Rpose
        self.align=align
        if align_feat is not None:
            self.alignfeat=build_neck(align_feat)
        else:
            self.alignfeat=None
        if bev_rnn:
            self.bev_rnn = build_backbone(bev_rnn)
        else:
            self.bev_rnn = False
        if pose_nn:
            self.pose_nn = build_backbone(pose_nn)
        else:
            self.pose_nn = False
        if encode_t:
            self.encode_t = encode_time(c1=1,
                                        c2=encode_t_num,
                                        cout=out_channels)
        else:
            self.encode_t = False
        # todo add paramerters to config
        self.conv = eval(fusion_type)(in_channels[0], 
                                  in_channels[1],
                                   out_channels, 
                                   k=3, s=1, 
                                   pooling_k=3)
    
    @force_fp32()
    def get_Rpose(self, pose):
        # pose0 2 pose1
        pose_ = pose[1].double().inverse() @ pose[0].double()
        pose_ = pose_.float()
        return pose_
    def rnn(self, x, Rpose=None,diff_t=None):
        if self.Rpose and Rpose is not None:
            b,_,h,w = x[0].shape
            Rpose = self.get_Rpose(Rpose)
            Rpose = Rpose.view(-1, 16)[:,:8]
            
            Rpose = Rpose.reshape(b, -1, 1, 1).expand(b, -1, h, w)
            x[1] = torch.cat((x[1], Rpose), 1)
        if self.pose_nn:
            x[1] = self.pose_nn(x[1])[0]
        x = self.conv(x)
        if self.encode_t and diff_t is not None:
            n,c,h,w = x.shape
            diff_t = diff_t.view(-1,1,1,1).expand(n,1,h,w)
            x = self.encode_t(x, diff_t)
        if self.bev_rnn:
            x = self.bev_rnn(x)[0]
        return x
    def forward(self, 
                feat,         
                metas: dict,
                ):
        if isinstance(feat,list) or isinstance(feat, tuple):
            feat = feat[0]
        if self.alignfeat is not None and self.align:
            pre_feat,dt = self.alignfeat(feat, **metas)
        else:
            pre_feat = feat
        bevfeat = self.rnn([pre_feat,feat],diff_t=dt)
        return bevfeat
    def forward_deploy(self, feat,pre_feat,dpose,dt):
        pre_feat = self.alignfeat.align_feat(pre_feat,dpose)
        bevfeat = self.rnn([pre_feat,feat],diff_t=dt)
        return bevfeat

def autopad(k, p=None):  # kernel, padding
    # Pad to 'same'
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p


class Conv(nn.Module):
    # Standard convolution
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        # self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
        self.act = nn.ReLU() if act is True else nn.Identity()
    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

class AcModule(nn.Module):
    def __init__(self, k=3):
        super().__init__()
        self.ap = nn.AvgPool2d(kernel_size=k, stride=1, padding=k // 2)
        self.sig = nn.Sigmoid()

    def forward(self, x):
        x = self.ap(x)
        x = self.sig(x)
        return x

class ReluModule(nn.Module):
    def __init__(self, c1, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        self.conv = Conv(c1, cout, k, s, p, g)
        self.ac_module = AcModule(k=pooling_k)

    def forward(self, x):
        x = self.conv(x)
        x1 = self.ac_module(x)

        return x.mul(x1)
    
class Fusion_block0(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        
        self.relu_moudle = nn.Identity()

    def forward(self, x):
        x_cat = torch.cat(x, 1)
        x_cat = self.relu_moudle(x_cat)
        return x_cat

class Fusion_block1(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()

        self.relu_moudle = ReluModule(c1+c2, cout, k, s, p, g, pooling_k)

    def forward(self, x):
        x_cat = torch.cat(x, 1)
        x_cat = self.relu_moudle(x_cat)
        return x_cat

class Fusion_block2(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        self.cv1 = Conv(c1, cout)
        self.cv2 = Conv(c2, cout)

        self.relu_moudle1 = ReluModule(cout, cout, k, s, p, g, pooling_k)
        self.relu_moudle2 = ReluModule(cout, cout, k, s, p, g, pooling_k)

    def forward(self, x):
        x1 = self.cv1(x[0])
        x2 = self.cv2(x[1])

        x1 = self.relu_moudle1(x1)
        x2 = self.relu_moudle2(x2)
        return x1.add(x2)

class Fusion_block3(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()

        self.cv1 = Conv(c1, cout)
        self.cv2 = Conv(c2, cout)
        self.relu_moudle = ReluModule(cout, cout, k, s, p, g, pooling_k)

    def forward(self, x):
        x1 = self.cv1(x[0])
        x2 = self.cv2(x[1])

        x2 = self.relu_moudle(x2)
        return x1.add(x2)

class Fusion_block4(nn.Module):
    def __init__(self, c1, c2, cout, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()

        self.cv1 = Conv(c1, cout)
        self.cv2 = Conv(c2, cout)
        self.cv3 = Conv(2 * cout, cout, k, s, p, g)
        self.ac_module = AcModule(k=pooling_k)

    def forward(self, x):
        x1 = self.cv1(x[0])
        x2 = self.cv2(x[1])

        x_cat = torch.cat([x1, x2], 1)
        x_cat = self.cv3(x_cat)
        x_cat = self.ac_module(x_cat)
        x2 = x2.mul(x_cat)
        return x1.add(x2)

class Fusion_block5(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        
        self.relu_moudle = nn.Identity()

    def forward(self, x):
        x_cat = x[0] + x[1]
        x_cat = self.relu_moudle(x_cat)
        return x_cat

class LSTM_CNN(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=3, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        assert c2 == cout
        self.cv1 = Conv(c1+c2, cout, act=False)
        self.cv2 = Conv(c1+c2, cout, act=False)
        self.cv3 = Conv(c1+c2, cout, act=False)
        self.relu_moudle = nn.Identity()

    def forward(self, x): # x0 ->xt, x1 -> ht-1
        ct = x[1]
        x_cat = torch.cat(x, 1)

        ft = torch.sigmoid(self.cv1(x_cat))
        it = torch.sigmoid(self.cv2(x_cat))
        ct_ = torch.tanh(self.cv3(x_cat))

        ct = ct * ft
        ct = ct + it * ct_
        return ct

class GRU_CNN(nn.Module):
    def __init__(self, c1=None, c2=None, cout=None, k=1, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        assert c2 == cout
        self.cv1 = Conv(c1+c2, cout, act=False)
        self.cv2 = Conv(c1+c2, cout, act=False)
        self.cv3 = Conv(c1+c2, cout, act=False)
        self.cv4 = Conv(c2, cout)

    def forward(self, x): 
        # x0 ->curr, x1 -> pre
        xt, ht_ = x[0], x[1]
        x_cat = torch.cat(x, 1)
        rt = torch.sigmoid(self.cv1(x_cat))
        zt = torch.sigmoid(self.cv2(x_cat))
        x_cat1 = torch.cat([xt, rt*ht_], 1)
        _ht = torch.tanh(self.cv3(x_cat1))
        ht = (1-zt)*ht_ + zt * (_ht)
        ht = self.cv4(ht)
        return ht

class encode_time(nn.Module):
    def __init__(self, c1=1, c2=4,cout=None, k=1, s=1, p=None, g=1, pooling_k=3):
        super().__init__()
        assert c1 == 1
        self.cv1 = Conv(c1, c2, act=False)
        self.cv2 = Conv(cout+c2, cout)
    def forward(self, x, t): 
        return self.cv2(torch.cat([x,self.cv1(t)],1))

class grid_sample(Function):

    @staticmethod
    def forward(ctx, feat: torch.Tensor, grid: torch.Tensor,
                mode, padding_mode, align_corners):
        """
        for deploy to onnx
        input:
        feat: B x c x h x w
        pose1: B x h x w x 2

        """
        shape = feat.shape
        output = torch.zeros(size=shape, device=feat.device, dtype=feat.dtype)

        # output = torch.zeros_like(feat)

        return output

    @staticmethod
    def symbolic(g, *inputs):
        return g.op("HJ_GridSample", inputs[0], inputs[1], \
         mode_i=inputs[2], padding_mode_i=inputs[3], align_corners_i=inputs[4])

grid_sample_ = grid_sample.apply

if __name__ == '__main__':
    
    N, H, W, C = 4, 256, 128, 32
    C2 = 128
    Cout = 16

    input = torch.rand(N, C, H, W)
    print("Input shape: ", input.shape)
    to_fusion = torch.rand(N, C2, H, W)
    print("to_fusion shape: ", to_fusion.shape)
    print("Output channel: ", Cout)


    acmodule = AcModule()
    x = acmodule(input)
    print("After acmodule shape: ", x.shape)

    relumodule = ReluModule(C, C//2, k=1, s=1)
    x = relumodule(input)
    print("After relumodule shape: ", x.shape)

    block1 = Fusion_block1(C, C2, Cout)
    x = block1([input, to_fusion])
    print("After block1 shape: ", x.shape)

    block2 = Fusion_block2(C, C2, Cout)
    x = block2([input, to_fusion])
    print("After block2 shape: ", x.shape)

    block3 = Fusion_block3(C, C2, Cout)
    x = block3([input, to_fusion])
    print("After block3 shape: ", x.shape)

    block4 = Fusion_block4(C, C2, Cout)
    x = block4([input, to_fusion])
    print("After block4 shape: ", x.shape)

    gru_cnn = GRU_CNN(C, C2, Cout)
    x = gru_cnn([input, to_fusion])
    print("After gru_cnn shape: ", x.shape)