import torch            
import torch.utils.data
import torch.nn.functional as F
from pcdet.utils.torch_utils import *

from .dsgn2_backbone_cnn import DSGN2BackboneCNN

class DSGN2BackboneCNNTRT(DSGN2BackboneCNN):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)
    
    def result_serialize(self, outs):
        outs_ = []
        for out in outs:
            for key in ['a','b','c']:
                outs_.append(out[0][key])
        return outs_
    
    def result_deserialize(self, outs): # TODO
        outs_ = {}
        keys = ['dir_cls_preds', "cls_preds", "box_preds"]
        for kid, key in enumerate(keys):
            outs_[key] = outs[kid]
        return outs_
    
    def forward(self, batched_images, downsampled_disp, norm_coord_imgs, valids):
        N = 1
        batch_dict={}
        # feature extraction
        
        # batched_images = torch.cat([left, right], dim=0)
        batched_features = self.feature_backbone(batched_images)
        # batched_features_list = [torch.cat([left_features[i], right_features[i]], dim=0) for i in range(len(left_features))]
        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features)
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:], batched_sem_feat[N:]
        
        if self.sem_neck is not None:
            batch_dict['sem_features'] = self.sem_neck([left_sem_feat])
        else:
            batch_dict['sem_features'] = [left_sem_feat]

        # No DPS for Fast version
        cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp)

        # D_cv, H_cv, W_cv = cost_raw.shape[2:]
        cost0 = self.dres0(cost_raw)
        out = self.dres1(cost0) + cost0
        if not self.drop_psv:
            # Retrieve Voxel Feature from Cost Volume Feature
            Voxel_psv = F.grid_sample(out, norm_coord_imgs, align_corners=True) # out: ([1, 32, 72, 120, 232]) norm_coord_img:([1, 20, 304, 288, 3])  # Voxel:([1, 32, 20, 304, 288])
            Voxel_psv = Voxel_psv * valids[:, None, :, :, :]
            # Voxels = [Voxel]
        else:
            Voxels = []


        # TODO: build LSS volume
        Voxel_lss_3d = None

        left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-3])
        depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) * left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs, align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids[:, None, :, :, :]

        Voxels =torch.cat((Voxel_psv,Voxel_lss_3d),dim=1)

        Voxels = self.rpn3d_convs(Voxels)

        Voxels = Voxels.view(N, -1, self.grid_size[1], self.grid_size[0])
        batch_dict['spatial_features'] = Voxels

        # batch_dict['volume_features'] = Voxels
        batch_dict['batch_size'] = 1
        
        return batch_dict