import torch
import torch.nn as nn
from .roi_head_template import RoIHeadTemplate
from ...utils import common_utils, spconv_utils
from ...ops.pointnet2.pointnet2_stack import voxel_pool_modules as voxelpool_stack_modules
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from functools import partial
import pickle
import copy

from pcdet.datasets.augmentor.X_transform import X_TRANS
from .target_assigner.proposal_target_layer_ted import ProposalTargetLayer
from ...utils import loss_utils
from ...utils.bbloss import bb_loss


class PositionalEmbedding(nn.Module):
    def __init__(self, demb=256):
        super(PositionalEmbedding, self).__init__()

        self.demb = demb

        inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
        self.register_buffer('inv_freq', inv_freq)

    # pos_seq =  pos_seq = torch.arange(seq_len-1, -1, -1.0)
    def forward(self, pos_seq, batch_size=2):
        sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
        pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)

        if batch_size is not None:
            return pos_emb[:, None, :].expand(-1, batch_size, -1)
        else:
            return pos_emb[:, None, :]

class CrossAttention(nn.Module):

    def __init__(self, hidden_dim, pos = True, head = 4):
        super(CrossAttention, self).__init__()

        self.hidden_dim = hidden_dim
        self.pos_dim = 8
        self.pos = pos

        if self.pos:
            self.pos_en = PositionalEmbedding(self.pos_dim)

            self.Q_linear = nn.Linear(hidden_dim+self.pos_dim, hidden_dim, bias=False)
            self.K_linear = nn.Linear(hidden_dim+self.pos_dim, hidden_dim, bias=False)
            self.V_linear = nn.Linear(hidden_dim+self.pos_dim, hidden_dim, bias=False)
        else:

            self.Q_linear = nn.Linear(hidden_dim, hidden_dim, bias=False)
            self.K_linear = nn.Linear(hidden_dim, hidden_dim, bias=False)
            self.V_linear = nn.Linear(hidden_dim, hidden_dim, bias=False)

        self.att = nn.MultiheadAttention(hidden_dim, head)


    def forward(self, inputs, Q_in): # N,B,C

        batch_size = inputs.shape[1]
        seq_len = inputs.shape[0]

        if self.pos:
            pos_input = torch.from_numpy(np.arange(seq_len)+1).cuda()
            pos_input = self.pos_en(pos_input, batch_size)
            inputs_pos = torch.cat([inputs, pos_input], -1)
            pos_Q = torch.from_numpy(np.array([seq_len])).cuda()
            pos_Q = self.pos_en(pos_Q, batch_size)
            Q_in_pos = torch.cat([Q_in, pos_Q], -1)
        else:
            inputs_pos = inputs
            Q_in_pos = Q_in

        Q = self.Q_linear(Q_in_pos)
        K = self.K_linear(inputs_pos)
        V = self.V_linear(inputs_pos)

        out = self.att(Q, K, V)

        return out[0]

class Attention_Layer(nn.Module):

    def __init__(self, hidden_dim):
        super(Attention_Layer, self).__init__()

        self.hidden_dim = hidden_dim

        self.Q_linear = nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.K_linear = nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.V_linear = nn.Linear(hidden_dim, hidden_dim, bias=False)

    def forward(self, inputs): # B*160,1/2/3,256


        Q = self.Q_linear(inputs)
        K = self.K_linear(inputs).permute(0, 2, 1)
        V = self.V_linear(inputs)

        alpha = torch.matmul(Q, K)

        alpha = F.softmax(alpha, dim=2)

        out = torch.matmul(alpha, V)

        out = torch.mean(out, -2)
    
        return out

def gen_sample_grid(rois, grid_size=7, grid_offsets=(0, 0), spatial_scale=1.):
    faked_features = rois.new_ones((grid_size, grid_size))
    N = rois.shape[0]
    dense_idx = faked_features.nonzero()  # (N, 2) [x_idx, y_idx]
    dense_idx = dense_idx.repeat(N, 1, 1).float()  # (B, 7 * 7, 2)

    local_roi_size = rois.view(N, -1)[:, 3:5]
    local_roi_grid_points = (dense_idx ) / (grid_size-1) * local_roi_size.unsqueeze(dim=1) \
                      - (local_roi_size.unsqueeze(dim=1) / 2)  # (B, 7 * 7, 2)

    ones = torch.ones_like(local_roi_grid_points[..., 0:1])
    local_roi_grid_points = torch.cat([local_roi_grid_points, ones], -1)

    global_roi_grid_points = common_utils.rotate_points_along_z(
        local_roi_grid_points.clone(), rois[:, 6]
    ).squeeze(dim=1)
    global_center = rois[:, 0:3].clone()
    global_roi_grid_points += global_center.unsqueeze(dim=1)

    x = global_roi_grid_points[..., 0:1]
    y = global_roi_grid_points[..., 1:2]

    x = (x.permute(1, 2, 0).contiguous() + grid_offsets[0]) * spatial_scale
    y = (y.permute(1, 2, 0).contiguous() + grid_offsets[1]) * spatial_scale

    return x.view(grid_size**2, -1), y.view(grid_size**2, -1)

def bilinear_interpolate_torch_gridsample(image, samples_x, samples_y):
    C, H, W = image.shape
    image = image.unsqueeze(1)  # change to:  C x 1 x H x W        C,K,1,2   C,K,1,1

    samples_x = samples_x.unsqueeze(2)
    samples_x = samples_x.unsqueeze(3)# 49,K,1,1
    samples_y = samples_y.unsqueeze(2)
    samples_y = samples_y.unsqueeze(3)

    samples = torch.cat([samples_x, samples_y], 3)
    samples[:, :, :, 0] = (samples[:, :, :, 0] / W)  # normalize to between  0 and 1

    samples[:, :, :, 1] = (samples[:, :, :, 1] / H)  # normalize to between  0 and 1
    samples = samples * 2 - 1  # normalize to between -1 and 1  # 49,K,1,2
    #B,C,H,W
    #B,H,W,2
    #B,C,H,W

    return torch.nn.functional.grid_sample(image, samples, align_corners=False)

class TEDSHead(RoIHeadTemplate):
    def __init__(self, input_channels, model_cfg, point_cloud_range=None, voxel_size=None, num_class=1,
                 **kwargs):
        super().__init__(num_class=num_class,  model_cfg=model_cfg)
        self.model_cfg = model_cfg
        self.pool_cfg = model_cfg.ROI_GRID_POOL
        LAYER_cfg = self.pool_cfg.POOL_LAYERS
        self.point_cloud_range = point_cloud_range
        self.voxel_size = voxel_size
        self.rot_num = 1 
        self.x_trans_train = X_TRANS(model_cfg = model_cfg)
        self.forward_ret_dict = {}
        
        c_out = 0
        self.roi_grid_pool_layers = nn.ModuleList()
        for src_name in self.pool_cfg.FEATURES_SOURCE:
            mlps = LAYER_cfg[src_name].MLPS
            for k in range(len(mlps)):
                mlps[k] = [input_channels[src_name]] + mlps[k]
            pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
                query_ranges=LAYER_cfg[src_name].QUERY_RANGES,
                nsamples=LAYER_cfg[src_name].NSAMPLE,
                radii=LAYER_cfg[src_name].POOL_RADIUS,
                mlps=mlps,
                pool_method=LAYER_cfg[src_name].POOL_METHOD,
            )

            self.roi_grid_pool_layers.append(pool_layer)

            # FIXME:
            c_out += sum([x[-1] for x in mlps]) 
            

        GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
     
        # pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
        pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out // 3
        shared_fc_list = []
        for k in range(0, self.model_cfg.SHARED_FC.__len__()):
            shared_fc_list.extend([
                nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False),
                nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
                nn.ReLU(inplace=True)
            ])
            pre_channel = self.model_cfg.SHARED_FC[k]

            if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
                shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
        
        
        # proposal target layer setup
        self.proposal_target_layers = []
        for i in range(6):
            # 不包含旋转信息，6个层均是相同的配置
            this_cfg = copy.deepcopy(self.model_cfg.TARGET_CONFIG)
            proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=this_cfg)
            self.proposal_target_layers.append(proposal_target_layer)
        
        # FIXME:
        # self.shared_fc_layers=nn.Sequential(*shared_fc_list)
        # self.shared_fc_layers1 = copy.deepcopy(self.shared_fc_layers)
        # self.shared_fc_layers2 = copy.deepcopy(self.shared_fc_layers)

        # TODO: 8*32
        alpha = 4
        beta = 1
        stride = 1
        
        # downsample11 = nn.Sequential(nn.Conv3d(64, 128,
		# 				           kernel_size=1, stride=1, bias=False),
		# 		                   nn.BatchNorm3d(128))
        
        # downsample21 = nn.Sequential(nn.Conv3d(64, 128,
		# 				           kernel_size=1, stride=1, bias=False),
		# 		                   nn.BatchNorm3d(128))
        
        # downsample31 = nn.Sequential(nn.Conv3d(64, 128,
		# 				           kernel_size=1, stride=1, bias=False),
		# 		                   nn.BatchNorm3d(128))
        
        
        downsample13 = nn.Sequential(nn.Conv3d(128, 256,
						           kernel_size=1, stride=1, bias=False),
				                   nn.BatchNorm3d(256))
        
        downsample23 = nn.Sequential(nn.Conv3d(128, 256,
						           kernel_size=1, stride=1, bias=False),
				                   nn.BatchNorm3d(256))
        
        downsample33 = nn.Sequential(nn.Conv3d(128, 256,
						           kernel_size=1, stride=1, bias=False),
				                   nn.BatchNorm3d(256))
     
        self.thr_block1 = nn.Sequential(
            Bottleneck(128, 128, alpha, beta, stride, expansion=1, downsample=None),
            nn.Conv3d(128,128,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,128,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            
            Bottleneck(128, 128, alpha, beta, stride, expansion=1, downsample=None),
            nn.Conv3d(128,128,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,128,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            
            Bottleneck(128, 128, alpha, beta, stride, expansion=2, downsample=downsample13),
            nn.Conv3d(256,256,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            nn.Conv3d(256,256,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            )
        
        self.thr_block2 = nn.Sequential(
            Bottleneck(128, 128, alpha, beta, stride, expansion=1, downsample=None),
            nn.Conv3d(128,128,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,128,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            
            Bottleneck(128, 128, alpha, beta, stride, expansion=1, downsample=None),
            nn.Conv3d(128,128,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,128,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            
            Bottleneck(128, 128, alpha, beta, stride, expansion=2, downsample=downsample23),
            nn.Conv3d(256,256,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            nn.Conv3d(256,256,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            )
        
        self.thr_block3 = nn.Sequential(
            Bottleneck(128, 128, alpha, beta, stride, expansion=1, downsample=None),
            nn.Conv3d(128,128,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,128,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            
            Bottleneck(128, 128, alpha, beta, stride, expansion=1, downsample=None),
            nn.Conv3d(128,128,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128,128,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            
            Bottleneck(128, 128, alpha, beta, stride, expansion=2, downsample=downsample33),
            nn.Conv3d(256,256,kernel_size=(1,3,3),bias=False),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            nn.Conv3d(256,256,kernel_size=(3,1,1),bias=False),
            nn.BatchNorm3d(256),
            nn.ReLU(inplace=True),
            )
        
        
        # self.thr_block1 = nn.Sequential(
        #                 NONLocalBlock3D(in_channels=128),
        #                 NONLocalBlock3D(in_channels=128),
        #                 NONLocalBlock3D(in_channels=128),
        #                 nn.Conv3d(128,128,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(128),
        #                 nn.ReLU(inplace=True),
        #                 nn.Conv3d(128,128,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(128),
        #                 nn.ReLU(inplace=True),
        #                 nn.Conv3d(128,256,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(256),
        #                 nn.ReLU(inplace=True))
        
        self.thr_datt1 = Cbam(64)
        
        # self.thr_block2 = nn.Sequential(
        #                 NONLocalBlock3D(in_channels=128),
        #                 NONLocalBlock3D(in_channels=128),
        #                 NONLocalBlock3D(in_channels=128),
        #                 nn.Conv3d(128,128,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(128),
        #                 nn.ReLU(inplace=True),
        #                 nn.Conv3d(128,128,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(128),
        #                 nn.ReLU(inplace=True),
        #                 nn.Conv3d(128,256,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(256),
        #                 nn.ReLU(inplace=True))
        
        self.thr_datt2 = Cbam(64)
        
        # self.thr_block3 = nn.Sequential(
        #                 NONLocalBlock3D(in_channels=128),
        #                 NONLocalBlock3D(in_channels=128),
        #                 NONLocalBlock3D(in_channels=128),
        #                 nn.Conv3d(128,128,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(128),
        #                 nn.ReLU(inplace=True),
        #                 nn.Conv3d(128,128,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(128),
        #                 nn.ReLU(inplace=True),
        #                 nn.Conv3d(128,256,kernel_size=3,bias=False),
        #                 nn.BatchNorm3d(256),
        #                 nn.ReLU(inplace=True))
        
        self.thr_datt3 = Cbam(64)


        self.shared_channel = pre_channel

        # FIXME:
        # pre_channel = self.model_cfg.SHARED_FC[-1] * 2
        pre_channel = self.model_cfg.SHARED_FC[-1]
        
        cls_fc_list = []
        for k in range(0, self.model_cfg.CLS_FC.__len__()):
            cls_fc_list.extend([
                nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False),
                nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
                nn.ReLU()
            ])
            pre_channel = self.model_cfg.CLS_FC[k]

            if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
                cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))

        cls_fc_list.append(nn.Linear(pre_channel, self.num_class, bias=True))
        self.cls_layers=nn.Sequential(*cls_fc_list)

        # FIXME:
        # pre_channel = self.model_cfg.SHARED_FC[-1] * 2
        pre_channel = self.model_cfg.SHARED_FC[-1] 
        reg_fc_list = []
        for k in range(0, self.model_cfg.REG_FC.__len__()):
            reg_fc_list.extend([
                nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False),
                nn.BatchNorm1d(self.model_cfg.REG_FC[k]),
                nn.ReLU()
            ])
            pre_channel = self.model_cfg.REG_FC[k]

            if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
                reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))

        reg_fc_list.append(nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True))
        reg_fc_layers = nn.Sequential(*reg_fc_list)
        self.reg_layers=reg_fc_layers

        # FIXME:
        # self.cross_attention_layers = Attention_Layer(self.shared_channel)
     
        self.init_weights()
        self.ious = {0: [], 1: [], 2: [], 3: []}

    def __init__org(self, input_channels, model_cfg, point_cloud_range=None, voxel_size=None, num_class=1,
                 **kwargs):
        super().__init__(num_class=num_class,  model_cfg=model_cfg)
        self.model_cfg = model_cfg
        self.pool_cfg = model_cfg.ROI_GRID_POOL
        LAYER_cfg = self.pool_cfg.POOL_LAYERS
        self.point_cloud_range = point_cloud_range
        self.voxel_size = voxel_size
        self.rot_num =  1    
        self.x_trans_train = X_TRANS()

        c_out = 0
        self.roi_grid_pool_layers = nn.ModuleList()
       
       
        # FIXME:
        # for src_name in self.pool_cfg.FEATURES_SOURCE:
        for src_name in self.pool_cfg.FEATURES_SOURCE[:3]:
            mlps = LAYER_cfg[src_name].MLPS
            for k in range(len(mlps)):
                mlps[k] = [input_channels[src_name]] + mlps[k]
            pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
                query_ranges=LAYER_cfg[src_name].QUERY_RANGES,
                nsamples=LAYER_cfg[src_name].NSAMPLE,
                radii=LAYER_cfg[src_name].POOL_RADIUS,
                mlps=mlps,
                pool_method=LAYER_cfg[src_name].POOL_METHOD,
            )

            self.roi_grid_pool_layers.append(pool_layer)

            c_out += sum([x[-1] for x in mlps]) 
            
        GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
        
        # pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
        # FIXME:
        pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out // 3
        shared_fc_list = []
        for k in range(0, self.model_cfg.SHARED_FC.__len__()):
            shared_fc_list.extend([
                nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False),
                nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
                nn.ReLU(inplace=True)
            ])
            pre_channel = self.model_cfg.SHARED_FC[k]

            if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
                shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
        
  
        self.shared_fc_layers = nn.Sequential(*shared_fc_list)
        self.shared_fc_layers1 = copy.deepcopy(self.shared_fc_layers)
        self.shared_fc_layers2 = copy.deepcopy(self.shared_fc_layers)
        
        self.shared_channel = pre_channel

        pre_channel = self.model_cfg.SHARED_FC[-1]
        
        cls_fc_list = []
        for k in range(0, self.model_cfg.CLS_FC.__len__()):
            cls_fc_list.extend([
                nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False),
                nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
                nn.ReLU()
            ])
            pre_channel = self.model_cfg.CLS_FC[k]

            if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
                cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))

        cls_fc_list.append(nn.Linear(pre_channel, self.num_class, bias=True))
        self.cls_layers=nn.Sequential(*cls_fc_list)
   
        pre_channel = self.model_cfg.SHARED_FC[-1] 
        reg_fc_list = []
        for k in range(0, self.model_cfg.REG_FC.__len__()):
            reg_fc_list.extend([
                nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False),
                nn.BatchNorm1d(self.model_cfg.REG_FC[k]),
                nn.ReLU()
            ])
            pre_channel = self.model_cfg.REG_FC[k]

            if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
                reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))

        reg_fc_list.append(nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True))
        reg_fc_layers = nn.Sequential(*reg_fc_list)
        self.reg_layers=reg_fc_layers

        # FIXME:
        # self.cross_attention_layers = Attention_Layer(self.shared_channel)
        # self.cross_attention_layers = CrossAttention(self.shared_channel)


        self.init_weights()
        self.ious = {0: [], 1: [], 2: [], 3: []}


    def init_weights(self):
        init_func = nn.init.xavier_normal_
        for trans_module in [self.cls_layers, self.reg_layers]:
            for m in trans_module.modules():
                if isinstance(m, nn.Linear):
                    init_func(m.weight)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
        for trans_module in [self.cls_layers, self.reg_layers]:
            nn.init.normal_(trans_module[-1].weight, 0, 0.01)
            nn.init.constant_(trans_module[-1].bias, 0)

        # # FIXME:
        # for m in self.shared_fc_layers.modules():
        #     if isinstance(m, nn.Linear):
        #         init_func(m.weight)
        #         if m.bias is not None:
        #             nn.init.constant_(m.bias, 0)
        
        # # FIXME:
        # for m in self.shared_fc_layers1.modules():
        #     if isinstance(m, nn.Linear):
        #         init_func(m.weight)
        #         if m.bias is not None:
        #             nn.init.constant_(m.bias, 0)
                    
        # for m in self.shared_fc_layers2.modules():
        #     if isinstance(m, nn.Linear):
        #         init_func(m.weight)
        #         if m.bias is not None:
        #             nn.init.constant_(m.bias, 0)

    def roi_grid_pool_org(self, batch_dict, i):
        """
        将生成的ROI旋转，并用来pooling各个旋转的3D voxel特征
        Args:
            batch_dict:
                batch_size:
                rois: (B, num_rois, 7 + C)
                point_coords: (num_points, 4)  [bs_idx, x, y, z]
                point_features: (num_points, C)
                point_cls_scores: (N1 + N2 + N3 + ..., 1)
                point_part_offset: (N1 + N2 + N3 + ..., 3)
        Returns:

        """

        if i==0:
            rot_num_id = ''
        else:
            rot_num_id = str(i)

        rois = batch_dict['rois'].clone()

        batch_size = batch_dict['batch_size']
        with_vf_transform = batch_dict.get('with_voxel_feature_transform', False) # False
       
        roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
            rois, grid_size=self.pool_cfg.GRID_SIZE
        )  # (BxN, 6x6x6, 3)
        # roi_grid_xyz: (B, Nx6x6x6, 3)
        roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3)

        # compute the voxel coordinates of grid points
        roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0]
        roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1]
        roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2]
        # roi_grid_coords: (B, Nx6x6x6, 3)
        roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1)

        batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1)
        for bs_idx in range(batch_size):
            batch_idx[bs_idx, :, 0] = bs_idx
        # roi_grid_coords: (B, Nx6x6x6, 4)
        # roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1)
        # roi_grid_coords = roi_grid_coords.int()
        roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1])

        pooled_features_list = []
        
        
        for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE):
            pool_layer = self.roi_grid_pool_layers[k]
            if src_name in ['x_conv1', 'x_conv2', 'x_conv3', 'x_conv4']:

                cur_stride = batch_dict['multi_scale_3d_strides'][src_name]

                j=i
                while 'multi_scale_3d_features'+rot_num_id not in batch_dict:
                    j-=1
                    rot_num_id = str(j)

                cur_sp_tensors = batch_dict['multi_scale_3d_features'+rot_num_id][src_name]

                if with_vf_transform: # False
                    cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name]
                else:
                    cur_sp_tensors = batch_dict['multi_scale_3d_features'+rot_num_id][src_name]

                # compute voxel center xyz and batch_cnt
                cur_coords = cur_sp_tensors.indices
                cur_voxel_xyz = common_utils.get_voxel_centers(
                    cur_coords[:, 1:4],
                    downsample_times=cur_stride,
                    voxel_size=self.voxel_size,
                    point_cloud_range=self.point_cloud_range
                )  #
                cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int()
                for bs_idx in range(batch_size):
                    cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
                # get voxel2point tensor

                v2p_ind_tensor = spconv_utils.generate_voxel2pinds(cur_sp_tensors)

                # compute the grid coordinates in this scale, in [batch_idx, x y z] order
                cur_roi_grid_coords = roi_grid_coords // cur_stride
                cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1)
                cur_roi_grid_coords = cur_roi_grid_coords.int()
                # voxel neighbor aggregation
                pooled_features = pool_layer(
                    xyz=cur_voxel_xyz.contiguous(),
                    xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
                    new_xyz=roi_grid_xyz.contiguous().view(-1, 3),
                    new_xyz_batch_cnt=roi_grid_batch_cnt,
                    new_coords=cur_roi_grid_coords.contiguous().view(-1, 4),
                    features=cur_sp_tensors.features.contiguous(),
                    voxel2point_indices=v2p_ind_tensor
                )
          
                pooled_features = pooled_features.view(
                    -1, self.pool_cfg.GRID_SIZE ** 3,
                    pooled_features.shape[-1]
                )  # (BxN, 6x6x6, C=64)
                
                pooled_features_list.append(pooled_features)
     
        ms_pooled_features = torch.cat(pooled_features_list, dim=-1) # [B*160,216,128]
        
        return ms_pooled_features

    def roi_grid_pool(self, batch_dict, i, src_index):
        # roi_grid_pool_simplified
        """
        Args:
            batch_dict:
                batch_size:
                rois: (B, num_rois, 7 + C)
                point_coords: (num_points, 4)  [bs_idx, x, y, z]
                point_features: (num_points, C)
                point_cls_scores: (N1 + N2 + N3 + ..., 1)
                point_part_offset: (N1 + N2 + N3 + ..., 3)
        Returns:

        """

        if i==0:
            rot_num_id = ''
        else:
            rot_num_id = str(i)

        rois = batch_dict['rois'].clone()
        batch_size = batch_dict['batch_size']
        with_vf_transform = batch_dict.get('with_voxel_feature_transform', False)

        roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
            rois, grid_size=self.pool_cfg.GRID_SIZE
        )  # (BxN, 6x6x6, 3)
        # roi_grid_xyz: (B, Nx6x6x6, 3)
        roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3)

        # compute the voxel coordinates of grid points
        roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0]
        roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1]
        roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2]
        # roi_grid_coords: (B, Nx6x6x6, 3)
        roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1)

        batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1)
        for bs_idx in range(batch_size):
            batch_idx[bs_idx, :, 0] = bs_idx
        # roi_grid_coords: (B, Nx6x6x6, 4)
        # roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1)
        # roi_grid_coords = roi_grid_coords.int()
        roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1])

        pooled_features_list = []
        src_name = self.pool_cfg.FEATURES_SOURCE[src_index]
        # for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE[src_index:src_index+1]):
        # TODO: 两次refine顺序一致，相同尺度特征， voxel set abstraction 权重共享。
        pool_layer_index = src_index if src_index < 3 else src_index - 3
        pool_layer = self.roi_grid_pool_layers[pool_layer_index]
        if src_name in ['x_conv1', 'x_conv2', 'x_conv3', 'x_conv4']:

            cur_stride = batch_dict['multi_scale_3d_strides'][src_name]

            j=i
            while 'multi_scale_3d_features'+rot_num_id not in batch_dict:
                j-=1
                rot_num_id = str(j)

            cur_sp_tensors = batch_dict['multi_scale_3d_features'+rot_num_id][src_name]

            if with_vf_transform:
                cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name]
            else:
                cur_sp_tensors = batch_dict['multi_scale_3d_features'+rot_num_id][src_name]

            # compute voxel center xyz and batch_cnt
            cur_coords = cur_sp_tensors.indices
            cur_voxel_xyz = common_utils.get_voxel_centers(
                cur_coords[:, 1:4],
                downsample_times=cur_stride,
                voxel_size=self.voxel_size,
                point_cloud_range=self.point_cloud_range
            )  #
            cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int()
            for bs_idx in range(batch_size):
                cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
            # get voxel2point tensor

            v2p_ind_tensor = spconv_utils.generate_voxel2pinds(cur_sp_tensors)

            # compute the grid coordinates in this scale, in [batch_idx, x y z] order
            cur_roi_grid_coords = roi_grid_coords // cur_stride
            cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1)
            cur_roi_grid_coords = cur_roi_grid_coords.int()
            # voxel neighbor aggregation
            pooled_features = pool_layer(
                xyz=cur_voxel_xyz.contiguous(),
                xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
                new_xyz=roi_grid_xyz.contiguous().view(-1, 3),
                new_xyz_batch_cnt=roi_grid_batch_cnt,
                new_coords=cur_roi_grid_coords.contiguous().view(-1, 4),
                features=cur_sp_tensors.features.contiguous(),
                voxel2point_indices=v2p_ind_tensor
            )
            
            pooled_features = pooled_features.view(
                -1, self.pool_cfg.GRID_SIZE ** 3,
                pooled_features.shape[-1]
            )  # (BxN, 6x6x6, 64)
            pooled_features_list.append(pooled_features)
              
        ms_pooled_features = torch.cat(pooled_features_list, dim=-1)

        return ms_pooled_features
    
    def get_global_grid_points_of_roi(self, rois, grid_size):
        rois = rois.view(-1, rois.shape[-1])
        batch_size_rcnn = rois.shape[0]

        local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size)  # (B, 6x6x6, 3)
        global_roi_grid_points = common_utils.rotate_points_along_z(
            local_roi_grid_points.clone(), rois[:, 6]
        ).squeeze(dim=1)
        global_center = rois[:, 0:3].clone()
        global_roi_grid_points += global_center.unsqueeze(dim=1)
        return global_roi_grid_points, local_roi_grid_points

    @staticmethod
    def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
        faked_features = rois.new_ones((grid_size, grid_size, grid_size))
        dense_idx = faked_features.nonzero()  # (N, 3) [x_idx, y_idx, z_idx]
        dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float()  # (B, 6x6x6, 3)

        local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
        roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
                          - (local_roi_size.unsqueeze(dim=1) / 2)  # (B, 6x6x6, 3)
        return roi_grid_points

    def roi_x_trans(self, rois, rot_num_i, transform_param):
        # rot_num_i 依次输入 1,2
        batch_size = len(rois)
        rois = rois.clone()

        x_transformed_roi = []

        for bt_i in range(batch_size):

            cur_roi = rois[bt_i]
            bt_transform_param = transform_param[bt_i]
            previous_trans_param = bt_transform_param[rot_num_i-1] # 0,1
            current_trans_param = bt_transform_param[rot_num_i]    # 1,2

            # 将当前的所有ROI均进行了变换
            transed_roi = self.x_trans_train.backward_with_param({'boxes': cur_roi,
                                                                  'transform_param': previous_trans_param})
            transed_roi = self.x_trans_train.forward_with_param({'boxes': transed_roi['boxes'],
                                                                  'transform_param': current_trans_param})

            x_transformed_roi.append(transed_roi['boxes'])

        return torch.stack(x_transformed_roi)

    def pred_x_trans(self, preds, rot_num_i, transform_param):

        batch_size = len(preds)
        preds = preds.clone()

        x_transformed_roi = []

        for bt_i in range(batch_size):

            cur_roi = preds[bt_i]
            bt_transform_param = transform_param[bt_i]
            current_trans_param = bt_transform_param[rot_num_i]

            transed_roi = self.x_trans_train.backward_with_param({'boxes': cur_roi,
                                                                  'transform_param': current_trans_param})

            x_transformed_roi.append(transed_roi['boxes'])

        return torch.stack(x_transformed_roi)

    def multi_grid_pool_aggregation_backup(self, batch_dict, targets_dict):

        all_preds = []
        all_scores = []

        all_shared_features = []

        for i in range(self.rot_num):

            rot_num_id = str(i)

            if i >= 1 and 'transform_param' in batch_dict:
                # i >= 1, 则self.rot_num>=2,所获得的roi是经过旋转的，需要旋转回来 1 -> 0, 2 -> 1 
                batch_dict['rois'] = self.roi_x_trans(batch_dict['rois'], i, batch_dict['transform_param'])

            if self.training:
                # 将预测roi与gt进行匹配，返回匹配好的roi和gt
                targets_dict = self.assign_targets(batch_dict, i, enable_dif=True)

                batch_dict['rois'] = targets_dict['rois']

                batch_dict['roi_labels'] = targets_dict['roi_labels']

            if 'transform_param' in batch_dict:
                pooled_features = self.roi_grid_pool(batch_dict, i)   # i是为了找对应的旋转3D特征图 [160,216,128]
            else:
                pooled_features = self.roi_grid_pool(batch_dict, 0)

            pooled_features = pooled_features.view(pooled_features.size(0), -1) # [B*160,216*128]  

            shared_features = self.shared_fc_layers(pooled_features)   # [B*160,256]
            shared_features = shared_features.unsqueeze(0)  # 1,B*160,C
            all_shared_features.append(shared_features)     
            pre_feat = torch.cat(all_shared_features, 0)   # 1/2/num_rot,B*160,256, # rot=1, pre_feat = shared_features

            # FIXME: no cross grid attention
            # i=0, 同一输入对应的ROI进行交互，i=1,两种输入对应的ROI进行交互
            # 渐进式交互
            # B*160, num_rot, 256 
            attentive_cur_feat = self.cross_attention_layers(pre_feat.permute(1, 0, 2)).unsqueeze(0) # 不同rot_id的roi进行交互[1,B*160,256]
            # attentive_cur_feat = pre_feat
            ##################################
            attentive_cur_feat = torch.cat([attentive_cur_feat, shared_features], -1)
            attentive_cur_feat = attentive_cur_feat.squeeze(0)  # B, C*2

            rcnn_cls = self.cls_layers(attentive_cur_feat)
            rcnn_reg = self.reg_layers(attentive_cur_feat)

            batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
                batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
            )

            if self.training:

                targets_dict['rcnn_cls'] = rcnn_cls  # [160, 1]
                targets_dict['rcnn_reg'] = rcnn_reg  # [160, 7]

                self.forward_ret_dict['targets_dict' + rot_num_id] = targets_dict

            batch_dict['rois'] = batch_box_preds
            batch_dict['roi_scores'] = batch_cls_preds.squeeze(-1)
           
            outs = batch_box_preds.clone()
            if 'transform_param' in batch_dict:
                # 返回至原始坐标系下
                outs = self.pred_x_trans(outs, i, batch_dict['transform_param'])

            all_preds.append(outs)
            all_scores.append(batch_cls_preds)

        return torch.mean(torch.stack(all_preds), 0), torch.mean(torch.stack(all_scores), 0)

    def multi_grid_pool_aggregation_org(self, batch_dict, targets_dict):

        all_preds = []
        all_scores = []

        all_shared_features = []

        for i in range(self.rot_num):

            rot_num_id = str(i)

            if i >= 1 and 'transform_param' in batch_dict:
                # i >= 1, 则self.rot_num>=2,所获得的roi是经过旋转的，需要旋转回来 1 -> 0, 2 -> 1 
                batch_dict['rois'] = self.roi_x_trans(batch_dict['rois'], i, batch_dict['transform_param'])

    
            # FIXME: multi-scale aggregation
            num_scale = len(self.pool_cfg.FEATURES_SOURCE)
            
            
            for k in range(num_scale):
                
                if self.training:
                # 将预测roi与gt进行匹配，返回匹配好的roi和gt
                    targets_dict = self.assign_targets(batch_dict, i, enable_dif=True)

                    batch_dict['rois'] = targets_dict['rois']

                    batch_dict['roi_labels'] = targets_dict['roi_labels']

                if 'transform_param' in batch_dict:
                    pooled_features = self.roi_grid_pool(batch_dict, i)   # i是为了找对应的旋转3D特征图 [B*160,216,128]
                else:
                    pooled_features = self.roi_grid_pool(batch_dict, 0)
                
                seg_index = int(pooled_features.shape[-1] / num_scale)
                
                sub_pooled_features = pooled_features[:,:,k*seg_index:(k+1)*seg_index] 

                sub_pooled_features = sub_pooled_features.contiguous().view(sub_pooled_features.size(0), -1)
                # FIXME:
                shared_features = self.shared_fc_layers(sub_pooled_features) if k==0 else self.shared_fc_layers1(sub_pooled_features)
                shared_features = shared_features.unsqueeze(0) 
                all_shared_features.append(shared_features) 
                pre_feat = torch.cat(all_shared_features, 0)
    
                # pooled_features = pooled_features.view(pooled_features.size(0), -1) # [B*160,216*128]  
                # shared_features = self.shared_fc_layers(pooled_features)   # [B*160,256]
                # shared_features = shared_features.unsqueeze(0)  # 1,B*160,C
                # all_shared_features.append(shared_features)     
                # pre_feat = torch.cat(all_shared_features, 0)   # 1/2/num_rot,B*160,256, # rot=1, pre_feat = shared_features

                # FIXME: no cross grid attention
                # i=0, 同一输入对应的ROI进行交互，i=1,两种输入对应的ROI进行交互
                # 渐进式交互
                attentive_cur_feat = self.cross_attention_layers(pre_feat.permute(1, 0, 2)).unsqueeze(0) # 不同rot_id的roi进行交互[1,B*160,256]
                # attentive_cur_feat = pre_feat
                ##################################
                attentive_cur_feat = torch.cat([attentive_cur_feat, shared_features], -1)
                attentive_cur_feat = attentive_cur_feat.squeeze(0)  # B, C*2
                
                rcnn_cls = self.cls_layers(attentive_cur_feat)
                rcnn_reg = self.reg_layers(attentive_cur_feat)
                
                
                batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
                    batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
                )

                if self.training:

                    targets_dict['rcnn_cls'] = rcnn_cls  # [160, 1]
                    targets_dict['rcnn_reg'] = rcnn_reg  # [160, 7]

                    # FIXME:
                    # self.forward_ret_dict['targets_dict' + rot_num_id] = targets_dict  
                    self.forward_ret_dict['targets_dict' + str(k)] = targets_dict  
                
                batch_dict['rois'] = batch_box_preds
                batch_dict['roi_scores'] = batch_cls_preds.squeeze(-1)
            
                outs = batch_box_preds.clone()
                if 'transform_param' in batch_dict:
                    # 返回至原始坐标系下
                    outs = self.pred_x_trans(outs, i, batch_dict['transform_param'])

                all_preds.append(outs)
                all_scores.append(batch_cls_preds)  
                
        return torch.mean(torch.stack(all_preds), 0), torch.mean(torch.stack(all_scores), 0)

    def assign_targets(self, batch_dict, rot_num_id, enable_dif = False):
        batch_size = batch_dict['batch_size']
        with torch.no_grad():
            if rot_num_id == 0:
                s_str = ''
            else:
                s_str = str(rot_num_id)
            if enable_dif: # True, 根据s_str寻找对应的旋转过的gt boxes
                targets_dict = self.proposal_target_layers[rot_num_id].forward(batch_dict, s_str)
            else:
                targets_dict = self.proposal_target_layers[rot_num_id].forward(batch_dict, '')

        rois = targets_dict['rois']  # (B, N, 7 + C)
        gt_of_rois = targets_dict['gt_of_rois']  # (B, N, 7 + C + 1)

        targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()

        # canonical transformation
        roi_center = rois[:, :, 0:3]
        roi_ry = rois[:, :, 6] % (2 * np.pi)
        gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
        gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry

        # transfer LiDAR coords to local coords
        gt_of_rois = common_utils.rotate_points_along_z(
            points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
        ).view(batch_size, -1, gt_of_rois.shape[-1])

        # flip orientation if rois have opposite orientation
        heading_label = gt_of_rois[:, :, 6] % (2 * np.pi)  # 0 ~ 2pi
        opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
        heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi)  # (0 ~ pi/2, 3pi/2 ~ 2pi)
        flag = heading_label > np.pi
        heading_label[flag] = heading_label[flag] - np.pi * 2  # (-pi/2, pi/2)
        heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)

        gt_of_rois[:, :, 6] = heading_label
        targets_dict['gt_of_rois'] = gt_of_rois
        return targets_dict
    
    def multi_grid_pool_aggregation(self, batch_dict, targets_dict):
        '''
        multi_grid_pool_aggregation_CBAM
        '''
    
        all_preds = []
        all_scores = []

        all_shared_features = []


        # FIXME: multi-scale aggregation
        num_scale = len(self.pool_cfg.FEATURES_SOURCE)
            
        for k in range(num_scale):
            
            if self.training:
            # 将预测roi与gt进行匹配，返回匹配好的roi和gt
                targets_dict = self.assign_targets(batch_dict, 0, enable_dif=True)
            
                batch_dict['rois'] = targets_dict['rois']

                batch_dict['roi_labels'] = targets_dict['roi_labels']

            pooled_features = self.roi_grid_pool(batch_dict, 0, k) # [B*160,7*7*7,64*num_scale]
            tmp_pooled_features = pooled_features.clone()
            # pooled_features = self.roi_grid_pool(batch_dict, 0) # [B*160,7*7*7,64*num_scale]
     
            # pre_pooled_features = self.roi_grid_pool(batch_dict, 0, max(k-1,0))     
            
            num_box, grid_size, fea = pooled_features.shape     # [B*160, 7*7*7,64]
            pooled_features   = pooled_features.contiguous().view(num_box,7,7,7,64).permute(0,4,1,2,3) # [320,192,7,7,7]
            if k != 0:
                pre_pooled_features = pre_pooled_features.contiguous().view(num_box,7,7,7,64).permute(0,4,1,2,3)
   
            if k == 0:
                # pre_pooled_features = pooled_features.clone().detach()
                att = self.thr_datt1(pooled_features)
                pooled_features = torch.cat((pooled_features,pooled_features*att),dim=1)
                shared_features = self.thr_block1(pooled_features) 
                
            elif k == 1:
                att = self.thr_datt2(pre_pooled_features)
                pooled_features = torch.cat((pooled_features,pre_pooled_features*att),dim=1)
                shared_features = self.thr_block2(pooled_features)
            
            elif k == 2:
            
                att = self.thr_datt3(pre_pooled_features)
                pooled_features = torch.cat((pooled_features,pre_pooled_features*att),dim=1)
                shared_features = self.thr_block3(pooled_features)

            # FIXME:
            pre_pooled_features = tmp_pooled_features
            
            shared_features = shared_features.contiguous().view(num_box,256)
            # shared_features = shared_features.unsqueeze(0) 
            # all_shared_features.append(shared_features) 
            # pre_feat = torch.cat(all_shared_features, 0)

            

            # FIXME: no cross grid attention
            # i=0, 同一输入对应的ROI进行交互，i=1,两种输入对应的ROI进行交互
            # 渐进式交互
            # attentive_cur_feat = self.cross_attention_layers(pre_feat.permute(1, 0, 2)).unsqueeze(0) # 不同rot_id的roi进行交互[1,B*160,256]
            # attentive_cur_feat = torch.cat([attentive_cur_feat, shared_features], -1)
            # attentive_cur_feat = attentive_cur_feat.squeeze(0)  # B, C*2
        
            rcnn_cls = self.cls_layers(shared_features)
            rcnn_reg = self.reg_layers(shared_features)
            
            batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
                batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
            )
        
            if self.training:

                targets_dict['rcnn_cls'] = rcnn_cls  # [160, 1]
                targets_dict['rcnn_reg'] = rcnn_reg  # [160, 7]
                # FIXME:
                # self.forward_ret_dict['targets_dict' + rot_num_id] = targets_dict  
                self.forward_ret_dict['targets_dict' + str(k)] = targets_dict  
        
            batch_dict['rois'] = batch_box_preds
            batch_dict['roi_scores'] = batch_cls_preds.squeeze(-1)
        
            outs = batch_box_preds.clone()
            if 'transform_param' in batch_dict:
                # 返回至原始坐标系下
                outs = self.pred_x_trans(outs, 0, batch_dict['transform_param'])

            all_preds.append(outs)
            all_scores.append(batch_cls_preds)  
        # return torch.mean(torch.stack(all_preds), 0), torch.mean(torch.stack(all_scores), 0)
        # FIXME:extra loss for combine prediction
        # mean_batch_box_preds = torch.mean(torch.stack(all_preds), 0)
        # mean_batch_cls_preds = torch.mean(torch.stack(all_scores), 0)
        
        return torch.mean(torch.stack(all_preds), 0), torch.mean(torch.stack(all_scores), 0)   

    def multi_grid_pool_aggregation_2loop(self, batch_dict, targets_dict):
        # multi_grid_pool_aggregation_2loop
        
        last_three_preds = []
        last_three_scores = []
        first_three_features = []

 
        # FIXME: multi-scale aggregation
        num_scale = len(self.pool_cfg.FEATURES_SOURCE)
        
        # init_rois       = batch_dict['rois']
        # init_roi_scores = batch_dict['roi_scores']
        # init_roi_labels = batch_dict['roi_labels'] 
        
        for k in range(num_scale):
            
            # if k == 3:
            #     batch_dict['rois']       = torch.cat((batch_dict['rois'],init_rois),dim=1)
            #     batch_dict['roi_scores'] = torch.cat((batch_dict['roi_scores'],init_roi_scores),dim=1)
            #     batch_dict['roi_labels'] = torch.cat((batch_dict['roi_labels'],init_roi_labels),dim=1)
            
            if self.training:
            # 将预测roi与gt进行匹配，返回匹配好的roi和gt
                  
                targets_dict = self.assign_targets(batch_dict, 0, enable_dif=True)
                
                batch_dict['rois'] = targets_dict['rois']
                batch_dict['roi_labels'] = targets_dict['roi_labels']

            pooled_features = self.roi_grid_pool(batch_dict, 0, k) # [B*160,216,128]
            
            num_box, grid_size, fea = pooled_features.shape
            pooled_features = pooled_features.view(num_box, -1) # [320,13824]
            
            if k == 0 or k == 3:
                shared_features = self.shared_fc_layers(pooled_features) 
            elif k == 1 or k == 4:
                shared_features = self.shared_fc_layers1(pooled_features)
            elif k == 2 or k == 5:
                shared_features = self.shared_fc_layers2(pooled_features)    # [320,256]
            
            # N,B,C
            # shared_features = shared_features.view(batch_dict['batch_size'],-1,256).permute(1,0,2)
 
            # if k <= 2:
            # first_three_features.append(shared_features)
            # attentive_cur_feat = shared_features.permute(1,0,2).view(-1, 256)
            attentive_cur_feat = shared_features
                  
            # if k > 2:
            #     # N,B,C
            #     # attentive_cur_feat = self.cross_attention_layers(first_three_features[k-3],shared_features) 
            #     # attentive_cur_feat = attentive_cur_feat.permute(1,0,2).contiguous().view(-1,256) # B, N, C
            #     # FIXME:
            #     shared_features = shared_features.unsqueeze(0)                 # [1,320,256]
            #     attentive_cur_feat = torch.cat([shared_features,first_three_features[k-3].unsqueeze(0)],dim=0)
            #     attentive_cur_feat = attentive_cur_feat.permute(1, 0, 2)
            #     attentive_cur_feat = self.cross_attention_layers(attentive_cur_feat).unsqueeze(0) + shared_features
            #     attentive_cur_feat = attentive_cur_feat.squeeze(0)
               
            
            rcnn_cls = self.cls_layers(attentive_cur_feat)
            rcnn_reg = self.reg_layers(attentive_cur_feat)
               
            batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
                batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
            )
            
            
            if self.training:

                targets_dict['rcnn_cls'] = rcnn_cls  # [160, 1]
                targets_dict['rcnn_reg'] = rcnn_reg  # [160, 7]
              
                self.forward_ret_dict['targets_dict' + str(k)] = targets_dict  
            
            batch_dict['rois'] = batch_box_preds
            batch_dict['roi_scores'] = batch_cls_preds.squeeze(-1)
        
            outs = batch_box_preds.clone()
            if 'transform_param' in batch_dict:
                # 返回至原始坐标系下
                outs = self.pred_x_trans(outs, 0, batch_dict['transform_param'])
                
            if k > 2:
                last_three_preds.append(outs)
                last_three_scores.append(batch_cls_preds)  
          
        return torch.mean(torch.stack(last_three_preds), 0), torch.mean(torch.stack(last_three_scores), 0)
    
    def get_box_reg_layer_loss(self, forward_ret_dict):
        loss_cfgs = self.model_cfg.LOSS_CONFIG
        code_size = self.box_coder.code_size
        reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
        gt_boxes3d_ct = forward_ret_dict['gt_of_rois'].clone()[..., 0:code_size]
        gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size)
        rcnn_reg = forward_ret_dict['rcnn_reg']  # (rcnn_batch_size, C)
        roi_boxes3d = forward_ret_dict['rois']
        rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]

        fg_mask = (reg_valid_mask > 0)
        fg_sum = fg_mask.long().sum().item()

        tb_dict = {}

        if loss_cfgs.REG_LOSS == 'smooth-l1':
            rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
            rois_anchor[:, 0:3] = 0
            rois_anchor[:, 6] = 0
            reg_targets = self.box_coder.encode_torch(
                gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
            )

            rcnn_loss_reg = self.reg_loss_func(
                rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
                reg_targets.unsqueeze(dim=0),
            )  # [B, M, 7]
            rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
            rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
            tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item()

            if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
                # TODO: NEED to BE CHECK
                fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
                fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]

                fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
                batch_anchors = fg_roi_boxes3d.clone().detach()
                roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
                roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
                batch_anchors[:, :, 0:3] = 0
                rcnn_boxes3d = self.box_coder.decode_torch(
                    fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
                ).view(-1, code_size)

                rcnn_boxes3d = common_utils.rotate_points_along_z(
                    rcnn_boxes3d.unsqueeze(dim=1), roi_ry
                ).squeeze(dim=1)
                rcnn_boxes3d[:, 0:3] += roi_xyz

                loss_corner = loss_utils.get_corner_loss_lidar(
                    rcnn_boxes3d[:, 0:7],
                    gt_of_rois_src[fg_mask][:, 0:7]
                )
                loss_corner = loss_corner.mean()
                loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight']

                rcnn_loss_reg += loss_corner
                tb_dict['rcnn_loss_corner'] = loss_corner.item()
        else:
            raise NotImplementedError

        reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
        code_size = self.box_coder.code_size
        shape = forward_ret_dict['gt_of_rois'].shape
        gt_boxes3d_ct = forward_ret_dict['gt_of_rois'].clone().view(shape[0] * shape[1], -1)[:, 0:7]
        rcnn_reg = forward_ret_dict['rcnn_reg']  # (rcnn_batch_size, C)
        rois = forward_ret_dict['rois'].clone().view(-1, code_size)[:, 0:7]
        rois[:, 0:3] = 0
        rois[:, 6] = 0

        batch_box_preds = self.box_coder.decode_torch(rcnn_reg, rois).view(-1, code_size)

        fg_mask = (reg_valid_mask > 0)

        if len(gt_boxes3d_ct[fg_mask]) == 0:
            b_loss=0
        else:
            b_loss = bb_loss(batch_box_preds[fg_mask], gt_boxes3d_ct[
                fg_mask]).sum()
            b_loss = b_loss / (fg_mask.sum() + 1)

        return rcnn_loss_reg+b_loss, tb_dict

    def get_box_cls_layer_loss(self, forward_ret_dict):
        loss_cfgs = self.model_cfg.LOSS_CONFIG
        rcnn_cls = forward_ret_dict['rcnn_cls']
        rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)

        if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
            rcnn_cls_flat = rcnn_cls.view(-1)
            batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
            cls_valid_mask = (rcnn_cls_labels >= 0).float()
            rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
        elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
            batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
            cls_valid_mask = (rcnn_cls_labels >= 0).float()
            rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
        else:
            raise NotImplementedError


        rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
        tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()}
        return rcnn_loss_cls, tb_dict

    def get_loss(self, tb_dict=None):
        # import pdb; pdb.set_trace()
        tb_dict = {} if tb_dict is None else tb_dict
        rcnn_loss = 0
        for i in range(6):
            if 'targets_dict'+str(i) in self.forward_ret_dict:
                rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict['targets_dict'+str(i)])
                rcnn_loss += rcnn_loss_cls
                rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict['targets_dict'+str(i)])
                rcnn_loss += rcnn_loss_reg

            if 'targets_dict_pi'+str(i) in self.forward_ret_dict:
                rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict['targets_dict_pi' + str(i)])
                rcnn_loss += 0.5*rcnn_loss_cls
                rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict['targets_dict_pi' + str(i)])
                rcnn_loss += 0.5*rcnn_loss_reg

            if 'targets_dict_p'+str(i) in self.forward_ret_dict:
                rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict['targets_dict_p' + str(i)])
                rcnn_loss += 0.5*rcnn_loss_cls
                rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict['targets_dict_p' + str(i)])
                rcnn_loss += 0.5*rcnn_loss_reg

        tb_dict['rcnn_loss'] = rcnn_loss.item()

        return rcnn_loss, tb_dict
    
    def forward(self, batch_dict):

        if 'transform_param' in batch_dict:
            trans_param = batch_dict['transform_param']
            self.rot_num = trans_param.shape[1]  # 更新self.rot_num
        else:
            self.rot_num = 1
            
        '''
        self.proposal_layer: 经NMS获取
          batch_dict:
                rois: (B, num_rois, 7+C)
                roi_scores: (B, num_rois)
                roi_labels: (B, num_rois)
        在i=0坐标系下进行
        '''
        # import pdb; pdb.set_trace()
        targets_dict = self.proposal_layer(
            batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
        )

        boxes, scores = self.multi_grid_pool_aggregation(batch_dict, targets_dict)
        
        if not self.training:
            batch_dict['batch_box_preds'] = boxes
            batch_dict['batch_cls_preds'] = scores

        return batch_dict
    

class channel_attention(nn.Module):
    def __init__(self,channel,ratio = 16):
        super(channel_attention,self).__init__()
        #最大池化和平均池化，输出层的高和长宽都是1
        self.max_pool = nn.AdaptiveMaxPool3d(1)
        self.ave_pool = nn.AdaptiveAvgPool3d(1)
        #两次全连接
        self.fc       =nn.Sequential(
            nn.Linear(channel,channel // ratio,False),
            nn.ReLU(),
            nn.Linear(channel // ratio, channel,False)
        )
        self.sigmoid = nn.Sigmoid()
    def forward(self,x):
    
        b,c,d,h,w = x.size()
        max_pool_out = self.max_pool(x).view([b,c])
        avg_pool_out = self.ave_pool(x).view([b,c])
 
        max_fc_out = self.fc(max_pool_out)
        ave_fc_out = self.fc(avg_pool_out)
 
        out = max_fc_out + ave_fc_out
        out = self.sigmoid(out).view([b,c,1,1,1])
        return out, out * x
 
class spacial_attention(nn.Module):
    def __init__(self,kernel = 3):
        super(spacial_attention, self).__init__()
 
        self.conv = nn.Conv3d(2,1,kernel,1,padding = 1,bias = False)
        self.sigmoid = nn.Sigmoid()
 
    def forward(self,x):
        b, c, d, h, w = x.size() # B, 64, 7, 7, 7
        #寻找通道上所有特征点的最大值和平均值
        max_pool_out,_ = torch.max(x,dim = 1,keepdim = True) # b,1,7,7,7
        mean_pool_out = torch.mean(x,dim = 1,keepdim= True)
    
        pool_out =torch.cat([max_pool_out,mean_pool_out],dim=1)
        out = self.conv(pool_out)
        #相当于获得每个特征点的权值
        out = self.sigmoid(out)
        # return out * x
        return out
 
class Cbam(nn.Module):
    def __init__(self,channel,kernel = 3,ratio = 16):
        super(Cbam,self).__init__()
        # self.channel_attention = channel_attention(channel,ratio)
        self.spacial_attention = spacial_attention(kernel)
    def forward(self,x):
        # att_chan, x = self.channel_attention(x)
        att_spat  = self.spacial_attention(x)
        # return att_chan*att_spat
        return att_spat
    
class _NonLocalBlockND(nn.Module):
    def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
        super(_NonLocalBlockND, self).__init__()

        assert dimension in [1, 2, 3]

        self.dimension = dimension
        self.sub_sample = sub_sample

        self.in_channels = in_channels
        self.inter_channels = inter_channels

        if self.inter_channels is None:
            self.inter_channels = in_channels // 2
            if self.inter_channels == 0:
                self.inter_channels = 1

        if dimension == 3:
            conv_nd = nn.Conv3d
            max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
            bn = nn.BatchNorm3d
        elif dimension == 2:
            conv_nd = nn.Conv2d
            max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
            bn = nn.BatchNorm2d
        else:
            conv_nd = nn.Conv1d
            max_pool_layer = nn.MaxPool1d(kernel_size=(2))
            bn = nn.BatchNorm1d

        self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
                         kernel_size=1, stride=1, padding=0)

        if bn_layer:
            self.W = nn.Sequential(
                conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
                        kernel_size=1, stride=1, padding=0),
                bn(self.in_channels)
            )
            nn.init.constant(self.W[1].weight, 0)
            nn.init.constant(self.W[1].bias, 0)
        else:
            self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
                             kernel_size=1, stride=1, padding=0)
            nn.init.constant(self.W.weight, 0)
            nn.init.constant(self.W.bias, 0)

        self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
                             kernel_size=1, stride=1, padding=0)

        self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
                           kernel_size=1, stride=1, padding=0)

        if sub_sample:
            self.g = nn.Sequential(self.g, max_pool_layer)
            self.phi = nn.Sequential(self.phi, max_pool_layer)

    def forward(self, x):
        '''
        :param x: (b, c, t, h, w)
        :return:
        '''

        batch_size = x.size(0)

        g_x = self.g(x).view(batch_size, self.inter_channels, -1)
        g_x = g_x.permute(0, 2, 1)

        theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
        theta_x = theta_x.permute(0, 2, 1)
        phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
        f = torch.matmul(theta_x, phi_x)
        N = f.size(-1)
        f_div_C = f / N

        y = torch.matmul(f_div_C, g_x)
        y = y.permute(0, 2, 1).contiguous()
        y = y.view(batch_size, self.inter_channels, *x.size()[2:])
        W_y = self.W(y)
        z = W_y + x

        return z

class NONLocalBlock3D(_NonLocalBlockND):
    def __init__(self, in_channels, inter_channels=None, sub_sample=False, bn_layer=True):
        super(NONLocalBlock3D, self).__init__(in_channels,
                                              inter_channels=inter_channels,
                                              dimension=3, sub_sample=sub_sample,
                                              bn_layer=bn_layer)

class Bottleneck(nn.Module):
    def __init__(self, inplanes, planes, alpha, beta, stride = 1, expansion = 1, downsample = None):
        super(Bottleneck, self).__init__()
        self.expansion = expansion
        self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm3d(planes)
        self.conv2 = nn.Conv3d(planes// beta, planes//alpha*(alpha-1), kernel_size=(1,3,3), stride=(1,stride,stride),
                                padding=(0,1,1), bias=False)
        self.Tconv = nn.Conv3d(planes//beta, planes//alpha, kernel_size = 3, bias = False,stride=(1,stride,stride), 
                                padding = (1,1,1))
        self.bn2 = nn.BatchNorm3d(planes)
        self.conv3 = nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm3d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.alpha = alpha
        self.beta = beta

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)


        if self.beta == 2:
            nchannels = out.size()[1] // self.beta 
            left  = out[:,:nchannels]
            right = out[:,nchannels:]

            out1 = self.conv2(left)
            out2 = self.Tconv(right)
            
        else:
            out1 = self.conv2(out)
            out2 = self.Tconv(out)


        out = torch.cat((out1,out2),dim=1)
        out = self.bn2(out)
        out = self.relu(out)
        
        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(residual)

        out += residual
        out = self.relu(out)

        return out