# Copyright (c) Phigent Robotics. All rights reserved.
import torch
import torch.nn.functional as F
from mmcv.runner import force_fp32

from mmdet3d.ops.bev_pool_v2.bev_pool import TRTBEVPoolv2
from mmdet.models import DETECTORS
from .. import builder
from .bevdet_fusion import BEVDet_Fusion
from torch.cuda.amp import autocast
from typing import Tuple, List, Dict
import torch.nn as nn
import numpy as np
import random
@DETECTORS.register_module()
class BEVDet4d_Fusion(BEVDet_Fusion):
    r"""BEVDet paradigm for multi-camera 3D object detection.

    Please refer to the `paper <https://arxiv.org/abs/2112.11790>`_

    Args:
        img_view_transformer (dict): Configuration dict of view transformer.
        img_bev_encoder_backbone (dict): Configuration dict of the BEV encoder
            backbone.
        img_bev_encoder_neck (dict): Configuration dict of the BEV encoder neck.
    """

    def __init__(self, 
        feat4d_fusion_layer=None,
        pre_process_fusion=None,
        with_prev=True,
        Nframe=False,
        sequ_num=1,
        numC_head=256,
        bev_align=True,
        sequ_mode=True,
        **kwargs):
        super(BEVDet4d_Fusion, self).__init__(
            **kwargs)
        if pre_process_fusion:
            self.pre_process_fusion = builder.build_backbone(pre_process_fusion)
        else:
            self.pre_process_fusion = None
        self.align_method = feat4d_fusion_layer.get("align_method",0)
        self.loss_sequ = feat4d_fusion_layer.get("loss_sequ",None)
        self.align_corners = feat4d_fusion_layer.get("align_corners",True)
        # aug parameters
        self.aug_p = feat4d_fusion_layer.get("aug_p",0.5)
        self.aug_sequ_noise = feat4d_fusion_layer.get("aug_sequ_noise",[0.0, 0.1])
        
        self.aug_lidar = feat4d_fusion_layer.get("aug_lidar", [0.0, 0.0])
        self.aug_bevfeat = feat4d_fusion_layer.get("aug_bevfeat", 0.0)
        
        if feat4d_fusion_layer:
            self.feat4d_fusion_layer = builder.build_fusion_layer(feat4d_fusion_layer)
        else:
            self.feat4d_fusion_layer = None

        if self.train_cfg is not None:
            cfg = self.train_cfg
        else:
            cfg = self.test_cfg
        self.cfg = cfg
        self.Nframe=Nframe
        self.sequ_num=sequ_num
        self.with_prev=with_prev
        self.numC_head = numC_head
        self.bev_align = bev_align

        if self.feat4d_fusion_layer is not None:
            self.detach = getattr(self.feat4d_fusion_layer,"detach")
        else:
            self.detach = True

        
        s = sequ_num if self.Nframe else 1
        self.pre_timestamp = [None for i in range(s)]
        self.pre_feat = [None for i in range(s)]
        self.pre_pose = [None for i in range(s)]
        self.pre_token = [None for i in range(s)]
        self.history_squ = s
        self.grid = None
        self.sequ_mode = sequ_mode
    def init_align_center(self):

        # pcr, vs, sd = self.point_cloud_range, self.voxels_size,self.stride
        sd = self.cfg['pts']['out_size_factor']
        pcr = self.cfg['pts']['point_cloud_range']
        vs = self.cfg['pts']['voxel_size']
        x_stride = vs[0] * sd
        y_stride = vs[1] * sd

        x_offset, y_offset = x_stride / 2, y_stride / 2
        x_shifts = torch.arange(
            pcr[0] + x_offset, pcr[3], step=x_stride, dtype=torch.float32,
            ).cuda()
        y_shifts = torch.arange(
            pcr[1] + y_offset, pcr[4], step=y_stride, dtype=torch.float32,
            ).cuda()
        z_shifts = x_shifts.new_tensor(0)
        i_shifts = x_shifts.new_tensor(1)
        x_shifts, y_shifts, z_shifts,i_shifts = torch.meshgrid([
            x_shifts, y_shifts, z_shifts, i_shifts,
        ])  # [x_grid, y_grid, z_grid]
        anchors = torch.cat((y_shifts, x_shifts, z_shifts, i_shifts), dim=-1)  # bugfix [y, x, z, 1] -> h w c
        anchors = anchors.squeeze()
        return anchors 

    def gen_grid(self, pose0, pose1, shape=(128,128)):
        n = pose0.shape[0]
        h,w = shape
        if self.grid is None:
            # generate grid
            xs = torch.linspace(
                0, w - 1, w, dtype=pose0.dtype,
                device=pose0.device).view(1, w).expand(h, w)
            ys = torch.linspace(
                0, h - 1, h, dtype=pose0.dtype,
                device=pose0.device).view(h, 1).expand(h, w)
            grid = torch.stack((xs, ys, torch.ones_like(xs)), -1)
            self.grid = grid
        else:
            grid = self.grid
        grid = grid.view(1, h, w, 3).expand(n, h, w, 3).view(n, h, w, 3, 1)

        pose = pose0.double().inverse() @ pose1.double()
        pose = pose.float().view(n,1,1,4,4)
        pose = pose[:, :, :,
                      [True, True, False, True], :][:, :, :, :,
                                                    [True, True, False, True]]
        # l02l1 = c12l0.inverse() @ c02l0
        # l02l1 = l02l1.float().view(n,1,1,4,4)
        feat2bev = torch.zeros((3, 3), dtype=grid.dtype).to(grid)
        feat2bev[0, 0] = self.img_view_transformer.grid_interval[0]
        feat2bev[1, 1] = self.img_view_transformer.grid_interval[1]
        feat2bev[0, 2] = self.img_view_transformer.grid_lower_bound[0]
        feat2bev[1, 2] = self.img_view_transformer.grid_lower_bound[1]
        feat2bev[2, 2] = 1
        feat2bev = feat2bev.view(1, 3, 3)
        tf = torch.inverse(feat2bev).matmul(pose).matmul(feat2bev)

        # transform and normalize
        grid = tf.matmul(grid)
        normalize_factor = torch.tensor([w - 1.0, h - 1.0],
                                        dtype=pose0.dtype,
                                        device=pose0.device)
        grid = grid[:, :, :, :2, 0] / normalize_factor.view(1, 1, 1,
                                                            2) * 2.0 - 1.0
        return grid

    def align_pose_matrix(self, pose):
        if self.grid is None:
            self.grid = self.init_align_center()
        # pose0 2 pose1
        pose_ = pose[0].double().inverse() @ pose[1].double()
        pose_ = pose_.float()
        sd = self.cfg['pts']['out_size_factor']
        voxels_size = self.cfg['pts']['voxel_size']

        h, w, _ = self.grid.shape
        pose_ = pose_.unsqueeze(1).unsqueeze(1)[..., :2, :4]
        grid = self.grid.unsqueeze(0).unsqueeze(-1)
        grid = ( pose_ @ grid)
        grid = grid.reshape(-1, h, w, 2)

        grid[..., 0] /= ((w - 1) * voxels_size[0] * sd / 2)
        grid[..., 1] /= ((h - 1) * voxels_size[1] * sd / 2)
        return grid

    def feat_warp(self, feats=None,
                    mode='bilinear',
                    align_corners=True, **kwargs):
        # prefeats: matrix feat
        feat = F.grid_sample(feats[1], 
                                feats[0],
                                mode=mode, 
                                padding_mode='zeros', 
                                align_corners=align_corners)
        feat = [feat]
        return feat 

    @force_fp32()
    def align_feat(self, feat, pose0, pose1):
        # if 1:
        #     feat.cpu().numpy().tolist().tofile('./tmp/bev_feat.bin')
        if self.align_method == 0:
            grid = self.align_pose_matrix((pose0, pose1))
        else:
            grid = self.gen_grid(pose0, pose1, feat.shape[2:])
        output = F.grid_sample(feat, grid, align_corners=self.align_corners)
        return [output]

    def fusion_block_default(self, feat, pre_feats=None, **kwargs):
        # prefeats: matrix feat
        pre_feat = F.grid_sample(pre_feats[1], 
                                pre_feats[0],
                                mode=getattr(self.feat4d_fusion_layer,"align_mode"), 
                                padding_mode='zeros', 
                                align_corners=getattr(self.feat4d_fusion_layer, "align_corners"))
        feat = self.feat4d_fusion_layer(feat, pre_feat)
        feat = [feat]
        return feat
    
    def fusion_block_deploy(self, feat, pre_feats=None, **kwargs):
        # prefeats: matrix feat
        from ..fusion_layers.feat_fusion import grid_sample_

        mode = getattr(self.feat4d_fusion_layer,"align_mode")
        padding_mode = "zeros"
        if mode == "bilinear":
            mode_enum = 0
        elif mode == "nearest":
            mode_enum = 1
        else:  # mode == 'bicubic'
            mode_enum = 2

        if padding_mode == "zeros":
            padding_mode_enum = 0
        elif padding_mode == "border":
            padding_mode_enum = 1
        else:  # padding_mode == 'reflection'
            padding_mode_enum = 2
        pre_feat = grid_sample_(pre_feats[1], 
                                pre_feats[0],
                                mode_enum,
                                padding_mode_enum, 
                                getattr(self.feat4d_fusion_layer, "align_corners"))
        feat = self.feat4d_fusion_layer(feat, pre_feat)
        feat = [feat]
        return feat
    
    @staticmethod
    def check_contiuguous(timestamp, frame, eps=1.0):
        if frame == 0:
            return False
        return (timestamp[frame] - timestamp[frame - 1]).abs() < eps
    
    @staticmethod
    def check_contiuguous_pre(timestamp, frame, eps=1.0):
        if frame == 0:
            return False
        return (timestamp[frame] - timestamp[frame - 1]).abs() < eps

    def get_key_frames(self, timestamp):
        key_frames = []
        for frame in range(len(timestamp)):
            if self.check_contiuguous(timestamp, frame) \
                and self.with_prev and s<self.sequ_num-1:
                s+=1
            else:
                s = 0
                key_frames.append(frame)
        if key_frames[-1] < len(timestamp) - 1:
            key_frames.append(len(timestamp) - 1)
        return key_frames

    def fusion_sequ_block_N(self, bev_feats, 
                            pose=None, 
                            timestamp=None,
                            **kwargs):
        fusion_feat = bev_feats
        key_frames = self.get_key_frames(timestamp)

        frame_list = []
        tmp1 = []
        s = 0
        save_gt = []
        key_frame_ = 0
        key_frame = key_frames[key_frame_]
        for frame in range(len(bev_feats)):  
            feat = fusion_feat[frame: frame + 1]
            tmp1.append(feat)
            if frame != key_frame:
                align_matrix = self.align_pose_matrix(pose[[frame, key_frame]])
                if self.detach:
                    tmp1[-1] = tmp1[-1].detach()
                tmp1[-1] = self.feat_warp((align_matrix, tmp1[-1]))[0]
                s += 1
            else:
                save_gt.append(frame)
                tmp2 = [torch.zeros_like(feat) for i in range(self.sequ_num-1-s)]
                tmp2.extend(tmp1)
                tmp2 = self.feat4d_fusion_layer(tmp2)
                frame_list.append(tmp2)
                tmp1 = []
                s = 0
                if key_frame < len(bev_feats)-1:
                    key_frame_ += 1
                    key_frame = key_frames[key_frame_]

        frame_all = self.bev_encoder(torch.cat(frame_list, 0).contiguous())
        return [frame_all], save_gt

    def fusion_sequ_block_2(self, bev_feats: torch.Tensor, 
                        pose=None, 
                        timestamp=None,
                        **kwargs):
        fusion_feat = bev_feats
        frame_list = []
        save_gt = []
        for frame in range(len(bev_feats)):
            feat_curr = fusion_feat[frame: frame + 1]
            tmp_curr = [feat_curr]
            if self.check_contiuguous_pre(timestamp, frame) and self.with_prev:
                align_matrix = self.align_pose_matrix(pose[frame-1: frame+1])
                pre_feat = frame_list[-1] 
                if self.detach:
                    pre_feat = pre_feat.detach()
                ffeat = self.feat_warp((align_matrix, pre_feat))[0]
                tmp_curr.append(ffeat)
            else:
                n,_,h,w = feat_curr.shape
                tmp_curr.append(bev_feats.new_zeros(size=(n,self.numC_head,h,w)))
            tmp_curr = self.feat4d_fusion_layer(tmp_curr)
            save_gt.append(frame)
            feat_bev_curr = self.bev_encoder(tmp_curr)
            frame_list.append(feat_bev_curr)
            
        frame_all = torch.cat(frame_list, 0).contiguous()
        return [frame_all], save_gt

    def fusion_sequ_block(self, bev_feats, **kwargs):
        if self.Nframe:
            # for training N frame cat mode
            return self.fusion_sequ_block_N(bev_feats[0], **kwargs)
        else:  
            # for training  2 frame  cat mode
            return self.fusion_sequ_block_2(bev_feats[0], **kwargs)

    def fusion_sequ_block(self, bev_feats, **kwargs):
        if self.training:
            if self.Nframe:
                # for training N frame cat mode
                return self.fusion_sequ_block_N(bev_feats[0], **kwargs)
            else:  
                # for training  2 frame  cat mode
                return self.fusion_sequ_block_2(bev_feats[0], **kwargs)
        else:
            return self.fusion_sequ_block_test(bev_feats[0], **kwargs)

    def extract_bev_feat(self, img=None, points=None, **kwargs):
        img_feats, depth = self.extract_img_feat(img, **kwargs)
        pts_feats = self.extract_pts_feat(points, None, **kwargs) 
        bev_feats = self.fusion_block(img_feats, pts_feats)
        return bev_feats, depth

    def extract_feat_default(self, points=None, img=None, img_metas=None, **kwargs):
        """Extract features from images and points.""" 

        bev_feats, depth = self.extract_bev_feat(img, points, **kwargs)
        bev_feats, save_gt = self.fusion_sequ_block(bev_feats, **kwargs)
        return (bev_feats, depth, save_gt)

    def prepare_inputs_sequ(self, points, imgs):
        rots,trans = imgs[1:3]
        ego2globals = imgs.pop(-2)
        sensor2egos = torch.zeros_like(ego2globals)
        sensor2egos[...,:3,:3] = rots
        sensor2egos[...,:3,3] = trans
        sensor2egos[...,3,3] = 1
        keyego2global = ego2globals[:, 0, ...].unsqueeze(1)
        global2keyego = torch.inverse(keyego2global.double())
        sensor2keyegos = global2keyego @ ego2globals.double() @ sensor2egos.double()
        sensor2keyegos = sensor2keyegos.float()
        rots = sensor2keyegos[...,:3,:3]
        trans = sensor2keyegos[...,:3,3]
        imgs[1],imgs[2] = rots,trans
        return points, imgs

    def prepare_inputs(self, points, imgs):
        rots,trans = imgs[1:3]
        ego2globals = imgs.pop(-2)
        sensor2egos = torch.zeros_like(ego2globals)
        sensor2egos[...,:3,:3] = rots
        sensor2egos[...,:3,3] = trans
        sensor2egos[...,3,3] = 1
        keyego2global = ego2globals[:, -1, 0, ...].unsqueeze(1).unsqueeze(1)
        global2keyego = torch.inverse(keyego2global.double())
        sensor2keyegos = global2keyego @ ego2globals.double() @ sensor2egos.double()
        sensor2keyegos = sensor2keyegos.float()
        rots = sensor2keyegos[...,:3,:3]
        trans = sensor2keyegos[...,:3,3]
        imgs[1],imgs[2] = rots,trans

        imgs = [torch.split(t, 1, 1) for t in imgs]
        imgs = [[p.squeeze(1) for p in t] for t in imgs]

        B = len(points) // self.sequ_num
        # if isinstance(points[0][0], list):
        #     points = [i[0] for i in points]
        points = [[points[i * self.sequ_num + k] for i in range(B)] \
                  for k in range(self.sequ_num)]
        return points, imgs

    def update_bevfeat(self, timestamp, bevfeat, pose, token):

        self.pre_timestamp.insert(0, timestamp)
        self.pre_feat.insert(0, bevfeat)
        self.pre_pose.insert(0, pose)
        self.pre_token.insert(0, token)
        self.pre_timestamp.pop()
        self.pre_feat.pop()
        self.pre_pose.pop()
        self.pre_token.pop()
        for i in range(1, self.history_squ):
            if self.pre_feat[i] is None:
                self.pre_feat[i] = torch.zeros_like(self.pre_feat[0])
                self.pre_timestamp[i] = self.pre_timestamp[0]
                self.pre_pose[i] = self.pre_pose[0]

    def extract_feat_sequ(self, points :List=None,
                        img :List[torch.Tensor]=None,
                        pose :torch.Tensor=None, 
                        timestamp: List[torch.Tensor]=None,
                        eps: float=1.0, 
                         **kwargs):

        points, img = self.prepare_inputs_sequ(points, img)
        mlp_input = self.img_view_transformer.get_mlp_input(*(img[1:]))
        img.append(mlp_input)
        bev_feat, depth = self.extract_bev_feat(img=img,
                                    points=points)
        
        bev_feat = bev_feat[0]
        token = kwargs['img_metas'][0]['sample_idx']
        # self.update_bevfeat(timestamp[0], bev_feat[0], pose)
        diff_t = 1e5
        if self.pre_timestamp[0] is not None:
            diff_t = (timestamp[0][0] - self.pre_timestamp[0]).abs()
            diff_t = diff_t.to(bev_feat.dtype)
            assert diff_t >= 0,f"infer timestamp error: dt < 0"
        # check contiguous frame
        if self.pre_token[0] == token:
                  # bevfeat rnn mode
            Rpose = (self.pre_pose[0], pose)
            if self.bev_align:
                bev_tmp = \
                    self.align_feat(self.pre_feat[0],
                                    Rpose[0], Rpose[1])[0]
            bev_feat = self.feat4d_fusion_layer([bev_tmp,
                                bev_feat],
                                diff_t=diff_t)
        else:
            bev_feat = self.feat4d_fusion_layer([torch.zeros_like(bev_feat),
                    bev_feat],
                    diff_t=torch.zeros(1,
                                       device=bev_feat.device,
                                       dtype=bev_feat.dtype))
            
        self.update_bevfeat(timestamp[0][0], bev_feat, pose, token)
        x = self.bev_encoder(bev_feat)
        return [x],[None],[None]

    def get_sequ(self, p=0.5):
        '''
        aug 4d time frame for training
        '''
        if not self.training or self.sequ_num<=2 or (not self.with_prev):
            return [i for i in range(self.sequ_num)]
        if random.random() < 1-p:
            L_pre = [i for i in range(self.sequ_num)]
            # 随机跳帧融合
            # lnum = random.randint(1, self.sequ_num-1)
            # L_pre = random.sample(range(self.sequ_num-1), lnum)
            # L_pre.append(self.sequ_num-1)
        else:
            # 随机选择融合帧数
            s_frame = random.randint(0, self.sequ_num-2)
            L_pre = [i for i in range(s_frame, self.sequ_num)]
        L_pre.sort()
        return L_pre

    def extract_feat(self, points :List=None,
                        img :List[torch.Tensor]=None,
                         **kwargs):
        # sequence sort : 0 1 2 3
        #import pdb;pdb.set_trace()
        L4d_id = self.get_sequ(p=self.aug_p)
        points, img = self.prepare_inputs(points, img)

        sequ_num = len(L4d_id)
        points = [points[i] for i in L4d_id]
        img = [[img[i][f] for f in L4d_id] for i in range(len(img))]

        # pred_batch = np.random.randint(low=1,high=self.sequ_num-1)
        pred_batch = []
        if not self.training:
            pred_batch = []
        pred_batch.append(sequ_num-1)
        pred_batch = np.unique(pred_batch).tolist()
        pred_batch.sort()
        bev_feat_list,depth_list = [], []
        for i in range(sequ_num):
            frame_id = sequ_num - i - 1
            inputs_curr = [s[frame_id] for s in img]
            inputs_curr[1], inputs_curr[2] = img[1][-1], img[2][-1]
            points_curr = points[frame_id]
            # aug lidar
            for i in range(len(points_curr)):
                if np.random.rand() < self.aug_lidar[0]:
                    points_curr[i][:2] *= 0.0
                elif np.random.rand() < self.aug_lidar[1]:
                    rand_id = np.random.randint(5, points_curr[i].shape[0])
                    points_curr[i] = points_curr[i][:rand_id]
                    points_curr[i][:2] *= 0.0
                    # points_curr[i]
            key_frame = (frame_id == sequ_num-1) # bugfix for training
            # key_frame = False
            # if self.detach:
            #     key_frame = (frame_id == sequ_num-1)
            # else:
            #     key_frame = (frame_id in pred_batch)
            if key_frame or self.with_prev:
                # todo add camera2lidar error
                mlp_input = self.img_view_transformer.get_mlp_input(*(inputs_curr[1:]))
                inputs_curr.append(mlp_input)
                if key_frame:
                    bev_feat, depth = self.extract_bev_feat(img=inputs_curr,
                                    points=points_curr)
                else: # only infer preframe for grad backwards only once
                    with torch.no_grad():
                        bev_feat, depth = self.extract_bev_feat(img=inputs_curr,
                                    points=points_curr)
            else:
                bev_feat = [torch.zeros_like(bev_feat_list[0])]
                depth = None
            bev_feat_list.insert(0, bev_feat[0])
            depth_list.insert(0, depth)
        if self.with_prev:
            pose = kwargs['pose']
            if isinstance(kwargs['timestamp'],list):
                kwargs['timestamp'] = kwargs['timestamp'][0]
            timestamp = kwargs['timestamp'].view(-1,self.sequ_num)
            pose = pose[:,L4d_id,:]
            timestamp = timestamp[:,L4d_id]
            diss_t = (timestamp[:,1:] - timestamp[:,:-1]).float()
            # # check contiguous frame2
            # for i in range(bev_feat_list[0].shape[0]):
            #     for j in range(self.sequ_num - 1):
            #         if diss[i, - j - 1] > 1.0:
            #             for k in range(j+1,self.sequ_num):
            #                 bev_feat_list[k][i][:] = 0
            #             break
            # align feat to curr
            # todo add sweep to gt error 
            if self.Nframe: # bevfeat Nframe cat mode
                if self.bev_align:
                    for idx in range(self.sequ_num):
                        grid = self.align_pose_matrix((pose[:,-1-idx,...], pose[:,-1,...]))
                        bev_feat_list[idx] = self.feat_warp((grid, bev_feat_list[idx]))[0]

                bev_feat = self.feat4d_fusion_layer(bev_feat_list)
            else:           # bevfeat rnn mode
                # s_frame = np.random.randint(self.sequ_num-1)
                # if np.random.rand() < 0.0:
                #     s_frame = self.sequ_num-2
                # if not self.training:
                #     s_frame = 0
                for frame_id in range(0, sequ_num-1):
                    Rpose = (pose[:,frame_id, ...], pose[:, frame_id+1, ...])
                    if self.bev_align:
                        # aug bev noise
                        bev_mem = bev_feat_list[frame_id] 
                        if np.random.rand() < self.aug_sequ_noise[0]: # sequ noise aug
                            bev_mem_tmp = bev_mem*(1+(torch.rand_like(bev_mem)-0.5) \
                                         / (0.5/self.aug_sequ_noise[1]))
                            bev_mem_tmp = bev_mem_tmp.clamp(min=0.0)
                        else:
                            bev_mem_tmp = bev_mem
                        bev_tmp = \
                            self.align_feat(bev_mem_tmp,
                                            Rpose[0], Rpose[1])[0]
                    # aug bevfeat
                    bev_feat_curr = bev_feat_list[frame_id + 1]
                    if (np.random.rand() < self.aug_bevfeat) and (frame_id==(sequ_num-2)):
                        bev_feat_curr = bev_feat_curr * 0 
                    bev_feat_list[frame_id + 1] = \
                        self.feat4d_fusion_layer([bev_tmp,
                                     bev_feat_curr],
                                     diff_t=diss_t[:,frame_id])
                bev_feat = bev_feat_list
        else:
            Rpose = torch.eye(4).cuda().unsqueeze(0).repeat(len(bev_feat_list[0]), 1, 1)
            Rpose = [Rpose, Rpose]
            bev_feat_list[-1] = self.feat4d_fusion_layer([bev_feat_list[-2], bev_feat_list[-1]], Rpose)
            bev_feat = bev_feat_list
        batch_size = bev_feat[0].shape[0]
        # pred_batch = [i for i in range(self.sequ_num)]
        # pred_batch = [self.sequ_num - 5]
        bev_feat = torch.stack([bev_feat_list[i] for i in pred_batch], 1).contiguous()
        B,S,C,H,W = bev_feat.shape
        x = self.bev_encoder(bev_feat.view(-1, C, H, W)) #.view(B,S,-1,H,W)
        # backward_gt = [i * self.sequ_num + self.sequ_num - 1 for i in range(bev_feat.shape[0])]
        backward_gt = []
        for b in pred_batch:
            backward_gt.extend([i * self.sequ_num + L4d_id[b] for i in range(batch_size)])
        backward_gt.sort()
        # print(backward_gt)
        # shape = depth_list[0][0].shape
        depth = torch.stack([depth_list[i][0] for i in pred_batch], 1)
        depth = depth.view(-1,*depth.shape[2:])
        return [x], [depth], backward_gt

    def filter_mask(self, gt):
        gt_ = []
        for k, g in enumerate(gt):
            if not (len(g)==1 and torch.all(g == -1000)):
                gt_.append(k)
        return gt_
    def update_task_(self, losses, task_):
        for k in losses:
            losses[k] += task_
            break
        return losses
    def losses_sum(self, losses):
        loss = 0
        for k in losses:
            loss += losses[k]
        return loss
    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        """Forward training function.

        Args:
            points (list[torch.Tensor], optional): Points of each sample.
                Defaults to None.
            img_metas (list[dict], optional): Meta information of each sample.
                Defaults to None.
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth 3D boxes. Defaults to None.
            gt_labels_3d (list[torch.Tensor], optional): Ground truth labels
                of 3D boxes. Defaults to None.
            gt_labels (list[torch.Tensor], optional): Ground truth labels
                of 2D boxes in images. Defaults to None.
            gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in
                images. Defaults to None.
            img (torch.Tensor optional): Images of each sample with shape
                (N, C, H, W). Defaults to None.
            proposals ([list[torch.Tensor], optional): Predicted proposals
                used for training Fast RCNN. Defaults to None.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                2D boxes in images to be ignored. Defaults to None.

        Returns:
            dict: Losses of different branchess
        """
        fusion_feat, depth, save_gt = self.extract_feat(
            points, img=img_inputs, **kwargs)
        
        gt_depth = kwargs.get('gt_depth', None)
        if gt_depth is not None:
            shape = gt_depth.shape
            gt_depth = gt_depth.view(-1,*shape[2:])[save_gt].contiguous()
            
            loss_depth = self.img_view_transformer.get_depth_loss(gt_depth, depth[0])
            losses = dict(loss_depth=loss_depth)
        else:
            losses = dict() # for training lost
        if hasattr(self, "pts_bbox_head"):
            # import pdb;pdb.set_trace()
            gt_bboxes_3d =[gt_bboxes_3d[i] for i in save_gt]
            gt_labels_3d =[gt_labels_3d[i] for i in save_gt]
            
            box_mask = self.filter_mask(gt_labels_3d)
            # gt_bboxes_3d =[gt_bboxes_3d[i] for i in box_mask]
            # gt_labels_3d =[gt_labels_3d[i] for i in box_mask]
            # fusion_feat_box = fusion_feat[0][box_mask]
            fusion_feat_box = fusion_feat[0].clone()
            for r in range(len(gt_bboxes_3d)):
                if r not in box_mask:
                    fusion_feat_box[r][:] = 0
            if len(gt_labels_3d) > 0:
                losses_pts = self.forward_pts_train([fusion_feat_box], gt_bboxes_3d,
                                                    gt_labels_3d, img_metas,
                                                    gt_bboxes_ignore)
                pts_w = self.train_cfg.pts.get("loss_weight", 1.0)
                self.update_task_weight(losses_pts, pts_w)
            # else:
            #     task_ = 0
            #     for p in self.pts_bbox_head.named_parameters():
            #         # if 'bn' in p[0]:
            #         #     continue
            #         task_ += 0.0 * p[1].sum()
            #     losses_pts = {'loss': task_}
            # losses_pts = {'task.loss_box': self.losses_sum(losses_pts)}
            losses.update(losses_pts)
        if hasattr(self,"map_head"):
            kwargs['gt_maps_3d'] = [kwargs['gt_maps_3d'][i] for i in save_gt]
            maps_mask = self.filter_mask(kwargs['gt_maps_3d'])
            # kwargs['gt_maps_3d'] = [kwargs['gt_maps_3d'][i] for i in maps_mask]
            # fusion_feat_maps = fusion_feat[0][maps_mask]
            fusion_feat_maps = fusion_feat[0].clone()
            for r in range(len(gt_bboxes_3d)):
                if r not in maps_mask:
                    fusion_feat_maps[r][:] = 0
            if len(kwargs['gt_maps_3d'])>0:
                # print(img_metas[0]['sample_idx'])
                # if img_metas[0]['sample_idx'] == 882:
                #     print(1)
                loss_map = self.map_head.forward_pts_train([fusion_feat_maps], 
                                                            kwargs["gt_maps_3d"],
                                                            img_metas)
                                            
                map_w = self.train_cfg.maps.get("loss_weight", 1.0)
                self.update_task_weight(loss_map, map_w)
                # losses.update(loss_map)
            # else:
            #     for p in self.map_head.named_parameters():
            #         # if 'bn' in p[0]:
            #         #     continue
            #         task_ += 0.0 * p[1].sum()
            #     loss_map = {'loss': task_}
            # loss_map = {'task.loss_map': self.losses_sum(loss_map)}
            losses.update(loss_map)
        # self.update_task_(losses, task_)
        # print(1)
        return losses
    
    def get_num_list(self,v,num_list=0):
        if isinstance(v, list) and len(v) == 1:
            num_list += 1
            num_list = self.get_num_list(v[0],num_list)
        return num_list

    def get_list_from_num(self, v,num_list=0):
        a = ''
        for i in range(num_list):
            a += "[0]"
        return eval(f"v{a}") 

    def get_from_list(self, v):
        num = self.get_num_list(v)
        return self.get_list_from_num(v,num)

    def forward_test(self,
                     points=None,
                     img_metas=None,
                     img_inputs=None,
                     **kwargs):
        """
        Args:
            points (list[torch.Tensor]): the outer list indicates test-time
                augmentations and inner torch.Tensor should have a shape NxC,
                which contains all points in the batch.
            img_metas (list[list[dict]]): the outer list indicates test-time
                augs (multiscale, flip, etc.) and the inner list indicates
                images in a batch
            img (list[torch.Tensor], optional): the outer
                list indicates test-time augmentations and inner
                torch.Tensor should have a shape NxCxHxW, which contains
                all images in the batch. Defaults to None.
        """
        for var, name in [(img_inputs, 'img_inputs'),
                          (img_metas, 'img_metas')]:
            if not isinstance(var, list):
                raise TypeError('{} must be a list, but got {}'.format(
                    name, type(var)))

        num_augs = len(img_inputs)
        if num_augs != len(img_metas):
            raise ValueError(
                'num of augmentations ({}) != num of image meta ({})'.format(
                    len(img_inputs), len(img_metas)))

        # if not isinstance(img_inputs[0][0], list):
        #     img_inputs = [img_inputs] if img_inputs is None else img_inputs
            # points = [points] if points is None else points
        # if isinstance(points[0], list):
            # points = [[i[0] for i in points[0][0]]]
        if self.sequ_mode:
            points = [i[0] for i in points[0][0]]
            img_metas = [i[0] for i in img_metas[0][0]]
            img_inputs = [i[0] for i in img_inputs[0]]
            kwargs['timestamp'] = kwargs['timestamp'][0]
        else:
            points = [i[0] for i in points[0][0]]
            # img_metas = [i[0] for i in img_metas[0][0]]
            # img_inputs = [i[0] for i in img_inputs[0]]
            # kwargs['timestamp'] = kwargs['timestamp'][0]
            
            # points = points[0] 
            img_metas = img_metas[0]
            img_inputs = img_inputs[0]
            kwargs['timestamp'] = kwargs['timestamp'][0]
        # points = [self.get_from_list(points)]
        # img_metas = [self.get_from_list(img_metas)]
        # img_inputs = [self.get_from_list(img_inputs)]
        # kwargs['timestamp'] = self.get_from_list(kwargs['timestamp'])

        return self.simple_test(points, img_metas, img_inputs,
                                    **kwargs)
        # else:
        #     return self.aug_test(None, img_metas[0], img_inputs[0], **kwargs)

    def aug_test(self, points, img_metas, img=None, rescale=False):
        """Test function without augmentaiton."""
        assert False

    def simple_test(self,
                    points,
                    img_metas,
                    img=None,
                    rescale=False,
                    **kwargs):
        """Test function without augmentaiton."""
        if 0:
            x, rots, trans, intrins, post_rots, post_trans = img[:6]
            x[0].cpu().numpy().tofile('./tmp/img_input.bin')

            pose1 = torch.stack([torch.eye(4) for i in range(post_rots.shape[1])], 0)
            pose2 = torch.stack([torch.eye(4) for i in range(post_rots.shape[1])], 0)
            pose1[:,:3,:3] = post_rots.squeeze(0)
            pose1[...,:3,3] = post_trans.squeeze(0)
            torch.inverse(pose1).cpu().numpy().tofile('./tmp/pose1.bin')

            combine = rots.matmul(torch.inverse(intrins))
            pose2[...,:3,:3] = combine.squeeze(0)
            pose2[...,:3,3] = trans.squeeze(0)
            pose2.cpu().numpy().tofile('./tmp/pose2.bin')
        if not self.sequ_mode:
            fusion_feat, _, _ = self.extract_feat(
                points, img=img, img_metas=img_metas, **kwargs)
        else:
            fusion_feat, _, _ = self.extract_feat_sequ(
                points, img=img, img_metas=img_metas, **kwargs)
        bbox_list = [dict() for _ in range(len(img_metas))]
        losses = dict()
        if hasattr(self, "pts_bbox_head"):
            bbox_pts = self.simple_test_pts(fusion_feat, 
                                            img_metas,
                                            rescale=rescale)
            for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
                # pts_bbox['boxes_3d'] = pts_bbox['boxes_3d'].tensor #todofix for infer
                result_dict['pts_bbox'] = pts_bbox
                # result_dict['timestamp'] = kwargs['timestamp']
        # print(img_feats[0].mean(),losses.keys())
        if hasattr(self,"map_head"):
            maps_bev = self.map_head.simple_test_maps(fusion_feat,
                                                     img_metas,
                                                     rescale=rescale)
            for result_dict, maps in zip(bbox_list, maps_bev):
                result_dict['maps'] = maps
                # result_dict["points"] = points  
                # result_dict.update(kwargs)   
                # result_dict['gt_maps_3d'] = kwargs['gt_maps_3d']     
        return bbox_list


    def forward_dummy(self,img=None):
        """Test function without augmentaiton."""
        # img pose1 pose2 bev_pose bev_feat lidar
        img_feats, pts_feats, _ = self.extract_feat_dummy(img)

        fusion_feat = self.fusion_block(img_feats[0], pts_feats[0], img[3:5],mode='deploy')

        out = []
        if hasattr(self, "pts_bbox_head"):
            outs_box = self.pts_bbox_head(fusion_feat)
            out.append(outs_box)

        if hasattr(self,"map_head"):
            outs_maps = self.map_head(fusion_feat)
            out.append(outs_maps)
        out.append(fusion_feat)
        return out

    def N21(self, x):
        if isinstance(x[0], list):
            y = []
            for p in x:
                y.extend(i[0] for i in p)
            return y
        elif isinstance(x, torch.Tensor):
            return x.reshape(-1)
        else:
            raise
    def input_reshape(self, **kwargs):
        for k in kwargs:
            if k in ['return_loss', 'rescale','gt_depth']:
                continue
            if k == 'img_inputs':
                for j, p in enumerate(kwargs['img_inputs']):
                    kwargs['img_inputs'][j] = p.view(-1,*p.shape[1:])
            else:
                kwargs[k] = self.N21(kwargs[k])

        return kwargs
    def forward(self, *args, **kwargs):
        if len(args) == 0:
            if kwargs.get('return_loss', True):
                kwargs = self.input_reshape(**kwargs)
                kwargs['pose'] = kwargs['img_inputs'].pop(-1)
            else:
                kwargs['pose'] = kwargs['img_inputs'][0].pop(-1)
            # kwargs = self.input_reshape(**kwargs)
            # kwargs['pose'] = kwargs['img_inputs'].pop(-1)
            return super().forward(**kwargs)
        else:
            return self.forward_dummy(img=args[0])

@DETECTORS.register_module()
class BEVDet4d_Fusion_Pose(BEVDet4d_Fusion):
    def __init__(self,**kwargs):
        super().__init__(**kwargs)
    
    def extract_img_feat(self, img, img_metas=None, **kwargs):
        if img is None or not self.with_img:
            return [None],[None]
        x = self.image_encoder(img[0]) # tensor
        img_feats, depth = self.img_view_transformer([x] + img[1:])
        img_feats ,depth= [img_feats], [depth]
        # img_feats,_,depth = super().extract_img_feat(img, img_metas, **kwargs)
        if self.pre_process_net:
            img_feats =  self.pre_process_net(img_feats[0]) # list
        return img_feats, x
    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        """Forward training function.

        Args:
            points (list[torch.Tensor], optional): Points of each sample.
                Defaults to None.
            img_metas (list[dict], optional): Meta information of each sample.
                Defaults to None.
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth 3D boxes. Defaults to None.
            gt_labels_3d (list[torch.Tensor], optional): Ground truth labels
                of 3D boxes. Defaults to None.
            gt_labels (list[torch.Tensor], optional): Ground truth labels
                of 2D boxes in images. Defaults to None.
            gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in
                images. Defaults to None.
            img (torch.Tensor optional): Images of each sample with shape
                (N, C, H, W). Defaults to None.
            proposals ([list[torch.Tensor], optional): Predicted proposals
                used for training Fast RCNN. Defaults to None.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                2D boxes in images to be ignored. Defaults to None.

        Returns:
            dict: Losses of different branchess
        """
        fusion_feat, depth, save_gt = self.extract_feat(
            points, img=img_inputs, **kwargs)
        
        gt_depth = kwargs.get('gt_depth', None)
        if gt_depth is not None:
            shape = gt_depth.shape
            gt_depth = gt_depth.view(-1,*shape[2:])[save_gt].contiguous()
            
            loss_depth = self.img_view_transformer.get_depth_loss(gt_depth, depth[0])
            losses = dict(loss_depth=loss_depth)
        else:
            losses = dict() # for training lost
        if hasattr(self, "pts_bbox_head"):
            # import pdb;pdb.set_trace()
            gt_bboxes_3d =[gt_bboxes_3d[i] for i in save_gt]
            gt_labels_3d =[gt_labels_3d[i] for i in save_gt]
            
            box_mask = self.filter_mask(gt_labels_3d)
            # gt_bboxes_3d =[gt_bboxes_3d[i] for i in box_mask]
            # gt_labels_3d =[gt_labels_3d[i] for i in box_mask]
            # fusion_feat_box = fusion_feat[0][box_mask]
            fusion_feat_box = fusion_feat[0].clone()
            for r in range(len(gt_bboxes_3d)):
                if r not in box_mask:
                    fusion_feat_box[r][:] = 0
            if len(gt_labels_3d) > 0:
                losses_pts = self.forward_pts_train([fusion_feat_box], gt_bboxes_3d,
                                                    gt_labels_3d, img_metas,
                                                    gt_bboxes_ignore)
                pts_w = self.train_cfg.pts.get("loss_weight", 1.0)
                self.update_task_weight(losses_pts, pts_w)
            # else:
            #     task_ = 0
            #     for p in self.pts_bbox_head.named_parameters():
            #         # if 'bn' in p[0]:
            #         #     continue
            #         task_ += 0.0 * p[1].sum()
            #     losses_pts = {'loss': task_}
            # losses_pts = {'task.loss_box': self.losses_sum(losses_pts)}
            losses.update(losses_pts)
        if hasattr(self,"map_head"):
            kwargs['gt_maps_3d'] = [kwargs['gt_maps_3d'][i] for i in save_gt]
            maps_mask = self.filter_mask(kwargs['gt_maps_3d'])
            # kwargs['gt_maps_3d'] = [kwargs['gt_maps_3d'][i] for i in maps_mask]
            # fusion_feat_maps = fusion_feat[0][maps_mask]
            fusion_feat_maps = fusion_feat[0].clone()
            for r in range(len(gt_bboxes_3d)):
                if r not in maps_mask:
                    fusion_feat_maps[r][:] = 0
            if len(kwargs['gt_maps_3d'])>0:
                # print(img_metas[0]['sample_idx'])
                # if img_metas[0]['sample_idx'] == 882:
                #     print(1)
                loss_map = self.map_head.forward_pts_train([fusion_feat_maps], 
                                                            kwargs["gt_maps_3d"],
                                                            img_metas)
                                            
                map_w = self.train_cfg.maps.get("loss_weight", 1.0)
                self.update_task_weight(loss_map, map_w)
                # losses.update(loss_map)
            # else:
            #     for p in self.map_head.named_parameters():
            #         # if 'bn' in p[0]:
            #         #     continue
            #         task_ += 0.0 * p[1].sum()
            #     loss_map = {'loss': task_}
            # loss_map = {'task.loss_map': self.losses_sum(loss_map)}
            losses.update(loss_map)
        # self.update_task_(losses, task_)
        # print(1)
        return losses
