import torch            
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from pcdet.utils.torch_utils import *

from .dsgn2_fast import DSGN2BackboneFast

class DSGN2BackboneFastTRT(DSGN2BackboneFast):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)
        
    def forward(self, left, right, downsampled_disp, norm_coord_imgs, valids):
        N = 1
        batch_dict={}
        # feature extraction
        left_features, right_features = self.feature_backbone([left, right]) # 4x, 8x, 16x

        batched_features_list = [torch.cat([left_features[i], right_features[i]], dim=0) for i in range(len(left_features))]
        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features_list)
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:], batched_sem_feat[N:]
        
        if self.sem_neck is not None:
            batch_dict['sem_features'] = self.sem_neck([left_sem_feat])
        else:
            batch_dict['sem_features'] = [left_sem_feat]

        # No DPS for Fast version
        cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp)

        D_cv, H_cv, W_cv = cost_raw.shape[2:]
        cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(cost_raw.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)))
        out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)) + cost0
        
        if not self.drop_psv:
            # Retrieve Voxel Feature from Cost Volume Feature
            Voxel_psv = F.grid_sample(out, norm_coord_imgs, align_corners=True) # out: ([1, 32, 72, 120, 232]) norm_coord_img:([1, 20, 304, 288, 3])  # Voxel:([1, 32, 20, 304, 288])
            Voxel_psv = Voxel_psv * valids[:, None, :, :, :]
            # Voxels = [Voxel]
        else:
            Voxels = []


        # TODO: build LSS volume
        Voxel_lss_3d = None
        Voxel_2D = None
        pred_depth_lss = None

        left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-3])
        depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) * left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs, align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids[:, None, :, :, :] ### Auto broadcasting

        Voxels =torch.cat((Voxel_psv,Voxel_lss_3d),dim=1)

        Voxels = self.rpn3d_convs(Voxels) 
        if self.num_3dconvs_hg > 0:
            if self.num_3dconvs_hg == 1:
                pre, post = True, True
                for hg_stereo_module in self.rpn3d_hgs:
                    Voxels, pre, post = hg_stereo_module(Voxels, pre, post) # ([1, 32, 20, 304, 288])
            else:
                pre, post = None, None
                for hg_stereo_module in self.rpn3d_hgs:
                    Voxels = hg_stereo_module(Voxels, pre, post)
        # batch_dict['volume_features_nopool'] = Voxels # ([1, 32, 20, 304, 288]) ([1, 32, 32, 204, 392])

        Voxels = self.rpn3d_pool(Voxels) # ([1, 32, 5, 304, 288]) ([1, 32, 8, 204, 392])

        batch_dict['volume_features'] = Voxels
        batch_dict['batch_size'] = 1
        
        return batch_dict