# Copyright (c) 2022-2023, NVIDIA Corporation & Affiliates. All rights reserved. 
# 
# This work is made available under the Nvidia Source Code License-NC. 
# To view a copy of this license, visit 
# https://github.com/NVlabs/FB-BEV/blob/main/LICENSE

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from .gsdoccdiss import GSDOCCDISS
from mmdet.models import DETECTORS
from mmdet3d.models.fbbev.custom_ops.bev_pool_v2 import bev_pool_v2
from mmdet3d.models.fbbev.custom_ops.grid_sampler import grid_sampler
def generate_forward_transformation_matrix(bda, img_meta_dict=None):
    b = bda.size(0)
    hom_res = torch.eye(4)[None].repeat(b, 1, 1).to(bda.device)
    for i in range(b):
        hom_res[i, :3, :3] = bda[i]
    return hom_res



@DETECTORS.register_module()
class GSDOCCDISSTRT(GSDOCCDISS):
    def __init__(self, *args, **kwargs):
        super(GSDOCCDISSTRT, self).__init__(*args, **kwargs)      
        self.num_cams = 6

        self.bev_pool_v2 = bev_pool_v2
        self.inverse = torch.linalg.inv
        self.grid_sample = grid_sampler
    
    def prepare_mlp_inputs(self, cam_params):
        return self.depth_net.get_mlp_input(*cam_params)
    
    def generate_forward_augs(self, bda):
        return generate_forward_transformation_matrix(bda)
    
    def prepare_bevpool_inputs(self, cam_params):
        coor = self.forward_projection.get_lidar_coor(*cam_params)
        ranks_bev, ranks_depth, ranks_feat, interval_starts, interval_lengths = \
            self.forward_projection.voxel_pooling_prepare_v2(coor)
        return ranks_bev, ranks_depth, ranks_feat, interval_starts, interval_lengths
    

    
    
    def forward_trt(self, inputs):
        # 添加输入检查
        if inputs is None:
            raise ValueError("Input is None")
            
        # 打印输入类型和内容
        print(f"Input type: {type(inputs)}")
        if isinstance(inputs, dict):
            print("Input is a dictionary")
            print(f"Keys: {list(inputs.keys())}")
            imgs = inputs['imgs']
            mlp_input = inputs['mlp_input']
            ranks_depth = inputs['ranks_depth']
            ranks_feat = inputs['ranks_feat']
            ranks_bev = inputs['ranks_bev']
            interval_starts = inputs['interval_starts']
            interval_lengths = inputs['interval_lengths']
            start_of_sequence = inputs['start_of_sequence']
            grid = inputs['grid']
            history_bev = inputs['history_bev']
            history_seq_ids = inputs['history_seq_ids']
            history_sweep_time = inputs['history_sweep_time']
        elif isinstance(inputs, tuple):
            print("Input is a tuple")
            print(f"Tuple length: {len(inputs)}")
            if len(inputs) != 12:
                raise ValueError(f"Expected 12 inputs, got {len(inputs)}")
            imgs, mlp_input, ranks_depth, ranks_feat, ranks_bev, interval_starts, interval_lengths, \
                start_of_sequence, grid, history_bev, history_seq_ids, history_sweep_time = inputs
        else:
            raise TypeError(f"Unexpected input type: {type(inputs)}")
        
        # 打印输入张量的形状
        print("\nInput tensor shapes:")
        print(f"imgs: {imgs.shape}")
        print(f"mlp_input: {mlp_input.shape}")
        print(f"ranks_depth: {ranks_depth.shape}")
        print(f"ranks_feat: {ranks_feat.shape}")
        print(f"ranks_bev: {ranks_bev.shape}")
        print(f"interval_starts: {interval_starts.shape}")
        print(f"interval_lengths: {interval_lengths.shape}")
        print(f"start_of_sequence: {start_of_sequence.shape}")
        print(f"grid: {grid.shape}")
        print(f"history_bev: {history_bev.shape}")
        print(f"history_seq_ids: {history_seq_ids.shape}")
        print(f"history_sweep_time: {history_sweep_time.shape}")
        
        # image encoder
        x = self.image_encoder(imgs) 
        feat, depth = self.depth_net(x, mlp_input) 
        
        # forward projection
        voxel_feat_all = self.bev_pool_v2(depth, 
                                    feat.permute(0, 1, 3, 4, 2), 
                                    ranks_depth, 
                                    ranks_feat, 
                                    ranks_bev,  
                                    interval_starts, 
                                    interval_lengths).permute(0, 4, 2, 3, 1)
        student_voxel = voxel_feat_all[0:x.shape[0]] # 体素特征
        bev_feat = student_voxel.mean(-1)
        results = {}
        
        # Fuse History
        forward_bev_feat, output_history_bev, output_history_seq_ids, output_history_sweep_time = self.fuse_history_trt(
            bev_feat, 
            start_of_sequence, 
            history_bev, 
            history_sweep_time, 
            history_seq_ids,
            grid) 
        results['forward_bev_feat'] = forward_bev_feat
        img_bev_feat = self.bev_encoder(forward_bev_feat) 

        results['student_voxel_feat'] = student_voxel
        results['img_bev_feat'] = img_bev_feat

        pred_occupancy = self.occupancy_head(img_bev_feat[0], results=results)
        
        return pred_occupancy, output_history_bev, output_history_seq_ids, output_history_sweep_time
    
    def fuse_history_trt(self, 
                         curr_bev,           # [B, C, H, W] 当前帧BEV特征
                         start_of_sequence,  # [B] 序列开始标志
                         history_bev,        # [B, T*C, H, W] 历史BEV特征，T为历史帧数
                         history_sweep_time, # [B, T] 历史时间戳
                         history_seq_ids,    # [B] 历史序列ID
                         grid                # [B, H, W, 2] 网格采样坐标
                         ):
        n, c_, h, w = curr_bev.shape 
        
        history_bev = history_bev.to(curr_bev)

        # 根据序列开始标志选择使用当前帧还是历史特征
        tmp_bev = start_of_sequence.float() * curr_bev.repeat(1, self.history_cat_num, 1, 1) + (1. - start_of_sequence.float()) * history_bev  # [B, T*C, H, W]
        n, mc, h, w = tmp_bev.shape
        tmp_bev = tmp_bev.reshape(n, mc, h, w)
        
        # 使用网格采样对历史特征进行采样
        sampled_history_bev = self.grid_sample(tmp_bev, 10. * grid.to(curr_bev.dtype).permute(0, 3, 1, 2), align_corners=True, interpolation_mode=self.interpolation_mode, padding_mode='zeros')  # [B, T*C, H, W]
        
        ## 更新历史信息
        # 在时间戳序列前添加当前帧的时间戳
        history_sweep_time = torch.cat([history_sweep_time.new_zeros(history_sweep_time.shape[0], 1), history_sweep_time], dim=1)  # [B, T+1]
            
        sampled_history_bev = sampled_history_bev.reshape(n, mc, h, w)  # [B, T*C, H, W]
        curr_bev = curr_bev.reshape(n, c_, h, w)  # [B, C, H, W]
        # 拼接当前帧和历史特征
        feats_cat = torch.cat([curr_bev, sampled_history_bev], dim=1)  # [B, (T+1)*C, H, W]

        # 重塑特征并添加时间编码
        feats_to_return = feats_cat.reshape(feats_cat.shape[0], self.history_cat_num + 1, self.single_bev_num_channels, *feats_cat.shape[2:])  # [B, T+1, C, H, W]
        
        # 添加时间编码
        feats_to_return = torch.cat(
        [feats_to_return, history_sweep_time[:, :, None, None, None].repeat(
            1, 1, 1, *feats_to_return.shape[3:]) * self.history_cam_sweep_freq
        ], dim=2)  # [B, T+1, C+1, H, W]

        # 时间维度上的卷积
        B, Tplus1, C, H, W = feats_to_return.shape
        feats_to_return = self.history_keyframe_time_conv(feats_to_return.reshape(B*Tplus1, C, H, W))  # [B*(T+1), C, H, W]
        feats_to_return = feats_to_return.reshape(B, Tplus1, self.single_bev_num_channels, H, W)  # [B, T+1, C, H, W]
        
        # 空间维度上的卷积
        B, Tplus1, C, H, W = feats_to_return.shape  
        feats_to_return = feats_to_return.reshape(B, Tplus1*C, H, W)  # [B, (T+1)*C, H, W]
        feats_to_return = self.history_keyframe_cat_conv(feats_to_return)  # [B, history_cat_conv_out_channels, H, W]
        
        # 更新历史状态
        history_bev = feats_cat[:, :-self.single_bev_num_channels, ...].detach().clone()  # [B, T*C, H, W]
        history_sweep_time = history_sweep_time[:, :-1]  # [B, T]

        return feats_to_return.clone(), history_bev, history_seq_ids, history_sweep_time



    
    def post_process(self, pred_occupancy):
        pred_occupancy = pred_occupancy.permute(0, 2, 3, 4, 1)[0]
        pred_occupancy = pred_occupancy[..., 1:]     
        pred_occupancy = pred_occupancy.softmax(-1)

        # convert to CVPR2023 Format
        pred_occupancy = pred_occupancy.permute(3, 2, 0, 1)
        pred_occupancy = torch.flip(pred_occupancy, [2])
        pred_occupancy = torch.rot90(pred_occupancy, -1, [2, 3])
        pred_occupancy = pred_occupancy.permute(2, 3, 1, 0)
        
        pred_occupancy_category = pred_occupancy.argmax(-1) 
        
        pred_occupancy_category= pred_occupancy_category.cpu().numpy()
        
        result_dict = {}
        result_dict['pts_bbox'] = None
        result_dict['iou'] = None
        result_dict['pred_occupancy'] = pred_occupancy_category
        result_dict['index'] = None
        return [result_dict]
    
