# DSGN++ backbone (fv+tv)
import math
import torch            
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from pcdet.utils.torch_utils import *
from mmcv.runner import _load_checkpoint 
from torch.cuda.amp.autocast_mode import autocast
from .dsgn2_backbone_transformer_fpn import DSGN2BackboneTransFPN
import time


class DSGN2BackboneFast(DSGN2BackboneTransFPN):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)
        need_keys = []

    def forward(self, batch_dict):
        start_time = time.time()
        # pdb.set_trace()
        save_container = True if 'save_container' in batch_dict else False
        if save_container: container = {}
        left = batch_dict['left_img']
        right = batch_dict['right_img']
        calib = batch_dict['calib']
        fu_mul_baseline = torch.as_tensor(
             [x.fu_mul_baseline for x in calib], dtype=torch.float32, device=left.device)
        calibs_Proj = torch.as_tensor(
            [x.P2 for x in calib], dtype=torch.float32, device=left.device)
        calibs_Proj_R = torch.as_tensor(
            [x.P3 for x in calib], dtype=torch.float32, device=left.device)
        N = batch_dict['batch_size']

        # feature extraction
        left_features, right_features = self.feature_backbone([left, right]) # 4x, 8x, 16x

        batched_features_list = [torch.cat([left_features[i], right_features[i]], dim=0) for i in range(len(left_features))]
        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features_list)
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:], batched_sem_feat[N:]

        
        if self.sem_neck is not None:
            batch_dict['sem_features'] = self.sem_neck([left_sem_feat])
        else:
            batch_dict['sem_features'] = [left_sem_feat]



        # stereo matching: build stereo volume
        # TODO: SID
        downsampled_depth = self.downsampled_depth.cuda()
        downsampled_disp = fu_mul_baseline[:, None] / \
            downsampled_depth[None, :] / (self.downsample_disp if not self.fullres_stereo_feature else 1)

        # No DPS for Fast version
        cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp)

        D_cv, H_cv, W_cv = cost_raw.shape[2:]
        cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(cost_raw.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)))
        out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, H_cv, W_cv)).view(N, -1, D_cv, H_cv, W_cv)) + cost0
                
            
        # convert plane-sweep into 3d volume
        coordinates_3d = self.coordinates_3d.cuda()

        norm_coord_imgs = []
        valids2d = []
        for i in range(N):
            # map to rect camera coordinates
            c3d = coordinates_3d.view(-1, 3)
            if 'random_T' in batch_dict:
                random_T = batch_dict['random_T'][i]
                c3d = torch.matmul(c3d, random_T[:3, :3].T) + random_T[:3, 3]

            # in pseudo lidar coord
            c3d = project_pseudo_lidar_to_rectcam(c3d)
            #------------ left images ----------------------
            coord_img, norm_coord_img = self.compute_mapping(c3d,
                left.shape[2:],
                torch.as_tensor(calib[i].P2, device='cuda', dtype=torch.float32),
                [self.CV_DEPTH_MIN, self.CV_DEPTH_MAX])
            coord_img = coord_img.view(*self.coordinates_3d.shape[:3], 3)
            norm_coord_img = norm_coord_img.view(*self.coordinates_3d.shape[:3], 3)
            norm_coord_imgs.append(norm_coord_img)

            # valid: within images
            img_shape = batch_dict['image_shape'][i]
            valid_mask_2d = (coord_img[..., 0] >= 0) & (coord_img[..., 0] <= img_shape[1]) & \
                (coord_img[..., 1] >= 0) & (coord_img[..., 1] <= img_shape[0])
            valids2d.append(valid_mask_2d)

        norm_coord_imgs = torch.stack(norm_coord_imgs, dim=0)
        if self.cat_right_img_feature or self.cat_right_lss:
            norm_coord_imgs_R = torch.stack(norm_coord_imgs_R, dim=0) 
        valids2d = torch.stack(valids2d, dim=0)
        batch_dict['norm_coord_imgs'] = norm_coord_imgs

        valids = valids2d & (norm_coord_imgs[..., 2] >= -1.) & (norm_coord_imgs[..., 2] <= 1.)
        batch_dict['valids'] = valids
        valids = valids.float()

        if not self.drop_psv:
            # Retrieve Voxel Feature from Cost Volume Feature
            Voxel_psv = F.grid_sample(out, norm_coord_imgs.to(out.dtype), align_corners=True) # out: ([1, 32, 72, 120, 232]) norm_coord_img:([1, 20, 304, 288, 3])  # Voxel:([1, 32, 20, 304, 288])
            Voxel_psv = Voxel_psv * valids[:, None, :, :, :]
            # Voxels = [Voxel]
        else:
            Voxels = []

        voxel_depths = c3d.view(coordinates_3d.shape)[0,0,:,2]
        voxel_disps = calib[0].fu_mul_baseline / voxel_depths

        # Retrieve Voxel Feature from 2D Img Feature
        if self.cat_img_feature:
            Voxel_2D_left = self.build_3d_geometry_volume(left_sem_feat, norm_coord_imgs, voxel_disps) #[1, 96, 480, 928] [1, 20, 304, 288, 3] [288]
            Voxel_2D_left *= valids2d.float()[:, None, :, :, :]
            # Voxels.append(Voxel_2D)

        # TODO: build LSS volume
        Voxel_lss_3d = None
        Voxel_2D = None
        pred_depth_lss = None
        if not self.drop_lss:
            left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-3])
            depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])
            if self.training:
                depth_lss_volume_full = F.interpolate(depth_lss_volume[:,None,], [self.maxdisp, *batch_dict['left_img'].shape[2:]],
                                            mode='trilinear',
                                            align_corners=True).squeeze(1)
                depth_lss_softmax = depth_lss_volume_full.softmax(dim=1, dtype=left_lss_feat.dtype)
                pred_depth_lss = self.dispregression(depth_lss_softmax.squeeze(1), self.depth.cuda())

            depth_lss_volume_s = depth_lss_volume.softmax(1)
            left_lss_volume = depth_lss_volume_s.unsqueeze(1) *  left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
            left_lss_volume = self._forward_voxel_net(left_lss_volume)
            Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs.to(left_lss_volume.dtype), align_corners=True)
            Voxel_lss_3d = Voxel_lss_3d * valids[:, None, :, :, :] ### Auto broadcasting


        
        Voxels =torch.cat((Voxel_psv,Voxel_lss_3d),dim=1)
        # Voxels = Voxel_psv
        if Voxel_2D is not None:
            Voxels = torch.cat((Voxels,Voxel_2D),dim=1)

        Voxels = self.rpn3d_convs(Voxels) 
        if self.num_3dconvs_hg > 0:
            if self.num_3dconvs_hg == 1:
                pre, post = True, True
                for hg_stereo_module in self.rpn3d_hgs:
                    Voxels, pre, post = hg_stereo_module(Voxels, pre, post) # ([1, 32, 20, 304, 288])
            else:
                pre, post = None, None
                for hg_stereo_module in self.rpn3d_hgs:
                    Voxels = hg_stereo_module(Voxels, pre, post)
        # batch_dict['volume_features_nopool'] = Voxels # ([1, 32, 20, 304, 288]) ([1, 32, 32, 204, 392])

        if self.sup_geometry == 'volume':
            Voxel_for_geo = Voxels

        Voxels = self.rpn3d_pool(Voxels) # ([1, 32, 5, 304, 288]) ([1, 32, 8, 204, 392])

        if self.sup_geometry == 'pooledvolume':
            Voxel_for_geo = Voxels
        batch_dict['volume_features'] = Voxels

        if self.voxel_occupancy:
            batch_dict = self.forward_voxel_occupancy(batch_dict, Voxel_for_geo)

        if self.front_surface_depth:
            batch_dict = self.forward_front_surface_depth_head(batch_dict, Voxel_for_geo, calibs_Proj)
        if pred_depth_lss is not None and self.training:
            batch_dict['depth_preds'].append(pred_depth_lss)
            batch_dict['depth_volumes'].append(depth_lss_volume_full)

        batch_dict['time_backbone_3d'] = time.time()-start_time
        if save_container:
            voxel_disps_channels = self.compute_disp_channels(voxel_disps,img_channels=left_sem_feat.shape[1], inv_ratio=self.inv_smooth_geo).to(torch.int32)
            batch_dict['downsampled_disp'] = downsampled_disp
            batch_dict['voxel_disps_channels'] = voxel_disps_channels

            data_to_cpp = {'norm_coord_imgs':norm_coord_imgs,
                'downsampled_disp':downsampled_disp,
                'valids2d':valids2d,
                'valids':valids,
                'voxel_disps_channels':voxel_disps_channels,
                'model':'DSGN2BackboneFast'}
            self.data_to_cpp = data_to_cpp
        return batch_dict

    def get_loss(self, batch_dict,tb_dict=None):
        if getattr(self.feature_backbone,'use_drloc', False):
            loss_drloc, tb_dict = self.feature_backbone.get_loss(batch_dict, tb_dict)
        else:
            loss_drloc = 0
        return loss_drloc, tb_dict
