import torch            
import torch.utils.data
import torch.nn.functional as F
from pcdet.utils.torch_utils import *

from .dsgn2_backbone_cnn2d import DSGN2BackboneCNN2D

class DSGN2BackboneCNN2DTRT(DSGN2BackboneCNN2D):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)
    
    def result_serialize(self, outs):
        outs_ = []
        for out in outs:
            for key in ['a','b','c']:
                outs_.append(out[0][key])
        return outs_
    
    def result_deserialize(self, outs): # TODO
        outs_ = {}
        keys = ['dir_cls_preds', "cls_preds", "box_preds"]
        for kid, key in enumerate(keys):
            outs_[key] = outs[kid]
        return outs_
    
    def forward(self, batched_images, downsampled_disp, norm_coord_imgs, valids):
        N = 1
        batch_dict={}
        # feature extraction
        # batched_images = torch.cat([left, right], dim=0)
        batched_features = self.feature_backbone(batched_images)
        # batched_features_list = [torch.cat([left_features[i], right_features[i]], dim=0) for i in range(len(left_features))]
        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features)
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:N+1], batched_sem_feat[N:N+1]
        # batch_dict['left_stereo_feat'] = left_stereo_feat
        # batch_dict['right_stereo_feat'] = right_stereo_feat
        
        batch_dict['sem_features'] = self.sem_neck([left_sem_feat])


        # No DPS for Fast version
        cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp)

        # D_cv, H_cv, W_cv = cost_raw.shape[2:]
        D_cv = 36
        H_cv = 96
        h_cv = 32
        W_cv = 224 # turn to constant
        # cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(cost_raw.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)))
        # out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)) + cost0
        cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(self.dres0_pool(cost_raw.view(N, -1, H_cv, W_cv))).view(N, -1, D_cv, h_cv, W_cv)))
        out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, h_cv, W_cv)).view(N, -1, D_cv, h_cv, W_cv)) + cost0
            
        Voxel_psv = F.grid_sample(out, norm_coord_imgs, align_corners=True) # out: ([1, 32, 72, 120, 232]) norm_coord_img:([1, 20, 304, 288, 3])  # Voxel:([1, 32, 20, 304, 288])
        Voxel_psv = Voxel_psv * valids 

        # TODO: build LSS volume

        left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-1])
        depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) * left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs, align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids ### Auto broadcasting for TRT model

        Voxels =torch.cat((Voxel_psv.unsqueeze(2),Voxel_lss_3d.unsqueeze(2)),dim=1).squeeze(2)

        Voxels = self.rpn3d_convs(Voxels)
        
        Voxels = Voxels.view(N, -1, self.grid_size[1], self.grid_size[0])
        
        batch_dict['spatial_features'] = Voxels
        batch_dict['batch_size'] = 1
        
        return batch_dict
    
class DSGN2BackboneCNN2DHalfTRT(DSGN2BackboneCNN2DTRT):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)
    
    def forward(self, batched_images, downsampled_disp, norm_coord_imgs, valids):
        self.half()
        N = 1
        batch_dict={}
        # feature extraction
        batched_features = self.feature_backbone(batched_images)
        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features)
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:N+1], batched_sem_feat[N:N+1]
        # batch_dict['left_stereo_feat'] = left_stereo_feat
        # batch_dict['right_stereo_feat'] = right_stereo_feat
        
        batch_dict['sem_features'] = self.sem_neck([left_sem_feat])

        # No DPS for Fast version
        cost_raw = self.build_cost(left_stereo_feat.float(), right_stereo_feat.float(),
                                None, None, downsampled_disp.float()).half()

        # D_cv, H_cv, W_cv = cost_raw.shape[2:]
        D_cv = 36
        H_cv = 96
        h_cv = 32
        W_cv = 224 # turn to constant
        # cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(cost_raw.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)))
        # out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)) + cost0
        cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(self.dres0_pool(cost_raw.view(N, -1, H_cv, W_cv))).view(N, -1, D_cv, h_cv, W_cv)))
        out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, h_cv, W_cv)).view(N, -1, D_cv, h_cv, W_cv)) + cost0
            
        Voxel_psv = F.grid_sample(out, norm_coord_imgs.to(out.dtype), align_corners=True) # out: ([1, 32, 72, 120, 232]) norm_coord_img:([1, 20, 304, 288, 3])  # Voxel:([1, 32, 20, 304, 288])
        Voxel_psv = Voxel_psv * valids[:, None, :, :, :]

        left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-1].to(Voxel_psv.dtype))
        depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) * left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs.to(left_lss_volume.dtype), align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids[:, None, :, :, :] ### Auto broadcasting

        Voxels =torch.cat((Voxel_psv,Voxel_lss_3d),dim=1)

        Voxels = self.rpn3d_convs(Voxels)
        
        Voxels = Voxels.view(N, -1, self.grid_size[1], self.grid_size[0])
        
        batch_dict['spatial_features'] = Voxels
        batch_dict['batch_size'] = 1
        
        return batch_dict