import math
import torch            
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from mmcv.cnn import build_conv_layer
from mmdet.models.builder import build_backbone, build_neck, build_backbone_new
from mmdet.models.backbones.resnet import BasicBlock
from . import submodule
from .submodule import convbn_3d, convbn, feature_extraction_neck_v2
from .cost_volume import BuildCostVolume
from pcdet.ops.build_geometry_volume import build_geometry_volume
from pcdet.ops.build_dps_geometry_volume import build_dps_geometry_volume
from pcdet.utils.torch_utils import *
import pdb
from mmcv.runner import _load_checkpoint 
from torch.cuda.amp.autocast_mode import autocast
import time
from .dsgn2_backbone_ablation import DSGN2BackboneTransAblation

class Container(torch.nn.Module):
    def __init__(self, my_values):
        super().__init__()
        for key in my_values:
            setattr(self, key, my_values[key])
            
            
class DSGN2BackboneCNN2D(DSGN2BackboneTransAblation):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)
        self.feature_backbone = build_backbone(model_cfg.feature_backbone)
        feature_backbone_pretrained = getattr(model_cfg, 'feature_backbone_pretrained', None)
        if feature_backbone_pretrained:
            self.feature_backbone.init_weights(pretrained=feature_backbone_pretrained)
        self.feature_neck = build_neck(model_cfg.feature_neck)
        
        self.depth_cv = self.maxdisp//self.downsample_disp
        CV_INPUT_DIM = self.cv_dim * 2
        self.dres0_pool = nn.AvgPool2d(kernel_size=(3,1),stride=(3,1))
        self.dres0_conv2d = nn.Conv2d(CV_INPUT_DIM*self.depth_cv, self.cv_dim*self.depth_cv, kernel_size=1,groups=self.depth_cv)
        self.dres0_bn3d = nn.BatchNorm3d(self.cv_dim)
        self.dres0_relu = nn.ReLU(inplace=True)
        self.dres1_conv2d = nn.Conv2d(self.cv_dim*self.depth_cv, self.cv_dim*self.depth_cv, kernel_size=3, stride=1, padding=1,groups=self.depth_cv)
        self.dres1_bn3d = nn.BatchNorm3d(self.cv_dim)
        # self.float_dtype = torch.float16 if self.fp16 else torch.float32
    
    def forward(self, batch_dict):
        left = batch_dict['left_img']
        right = batch_dict['right_img']
        calib = batch_dict['calib']
        fu_mul_baseline = torch.as_tensor(
             [x.fu_mul_baseline for x in calib], dtype=left.dtype, device=left.device)
        calibs_Proj = torch.as_tensor(
            [x.P2 for x in calib], dtype=left.dtype, device=left.device)

        N = batch_dict['batch_size']
        batched_images = torch.cat([left, right], dim=0)
        batched_features = self.feature_backbone(batched_images)

        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features)
        batched_stereo_feat = batched_stereo_feat
        batched_sem_feat = batched_sem_feat
        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:], batched_sem_feat[N:]

        batch_dict['sem_features'] = self.sem_neck([left_sem_feat])

        downsampled_depth = self.downsampled_depth.cuda()
        downsampled_disp = fu_mul_baseline[:, None] / \
            downsampled_depth[None, :] / (self.downsample_disp if not self.fullres_stereo_feature else 1)
        self.data_to_cpp['downsampled_disp'] = downsampled_disp

        cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp)

        # stereo matching network
        D_cv, H_cv, W_cv = cost_raw.shape[2:]
        cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(self.dres0_pool(cost_raw.view(N, -1, H_cv, W_cv))).view(N, -1, D_cv, int(H_cv/3), W_cv)))
        out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, int(H_cv/3), W_cv)).view(N, -1, D_cv, int(H_cv/3), W_cv)) + cost0
            
        # convert plane-sweep into 3d volume
        #############
        self.data_to_cpp['coordinates_3d'] = self.coordinates_3d
        #############
        coordinates_3d = self.coordinates_3d.cuda()
        norm_coord_imgs = []

        valids2d = []
        for i in range(N):
            # map to rect camera coordinates
            c3d = coordinates_3d.view(-1, 3)
            if 'random_T' in batch_dict:
                random_T = batch_dict['random_T'][i]
                c3d = torch.matmul(c3d, random_T[:3, :3].T) + random_T[:3, 3]

            # in pseudo lidar coord
            c3d = project_pseudo_lidar_to_rectcam(c3d)
            #------------ left images ----------------------
            coord_img, norm_coord_img = self.compute_mapping(c3d,
                left.shape[2:],
                torch.as_tensor(calib[i].P2, device='cuda', dtype=torch.float32),
                [self.CV_DEPTH_MIN, self.CV_DEPTH_MAX])
            coord_img = coord_img.view(*self.coordinates_3d.shape[:3], 3)
            norm_coord_img = norm_coord_img.view(*self.coordinates_3d.shape[:3], 3)
            norm_coord_imgs.append(norm_coord_img)

            # valid: within images
            img_shape = batch_dict['image_shape'][i]
            valid_mask_2d = (coord_img[..., 0] >= 0) & (coord_img[..., 0] <= img_shape[1]) & \
                (coord_img[..., 1] >= 0) & (coord_img[..., 1] <= img_shape[0])
            valids2d.append(valid_mask_2d)

        norm_coord_imgs = torch.stack(norm_coord_imgs, dim=0)
        #############
        self.data_to_cpp['norm_coord_imgs'] = norm_coord_imgs

        valids2d = torch.stack(valids2d, dim=0)
        batch_dict['norm_coord_imgs'] = norm_coord_imgs

        valids = valids2d & (norm_coord_imgs[..., 2] >= -1.) & (norm_coord_imgs[..., 2] <= 1.)
        batch_dict['valids'] = valids
        valids = valids.float()
        #############
        self.data_to_cpp['valids'] = valids
        # self.data_to_cpp['valids2d'] = valids2d.float()[:, None, :, :, :]
        #############
        
        if not self.drop_psv:    
            # Retrieve Voxel Feature from Cost Volume Feature
            Voxel_psv = F.grid_sample(out, norm_coord_imgs.to(out.dtype), align_corners=True) # out: ([1, 32, 36, 96, 224]) norm_coord_img:([1, 10, 152, 144, 3])  # Voxel:([1, 32, 10, 152, 144])
            Voxel_psv = Voxel_psv * valids[:, None, :, :, :]
        else:
            Voxels = []


        # TODO: build LSS volume
        Voxel_lss_3d = None
        Voxel_2D = None
        pred_depth_lss = None
        left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-1],calibs_Proj)
        depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])
        depth_lss_volume_full = F.interpolate(depth_lss_volume[:,None,], [self.maxdisp, *batch_dict['left_img'].shape[2:]],
                                        mode='trilinear',
                                    align_corners=True).squeeze(1)
        depth_lss_softmax = depth_lss_volume_full.softmax(dim=1, dtype=left_lss_feat.dtype)
        pred_depth_lss = self.dispregression(depth_lss_softmax.squeeze(1), self.depth.cuda()) # training only

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) *  left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs.to(left_lss_volume.dtype), align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids[:, None, :, :, :]

        Voxels =torch.cat((Voxel_psv,Voxel_lss_3d),dim=1)

        Voxels = self.rpn3d_convs(Voxels) 
        

        if self.sup_geometry == 'volume':
            Voxel_for_geo = Voxels
            
        # Voxels = self.rpn3d_pool(Voxels)
        Voxels = Voxels.view(N, -1, self.grid_size[1], self.grid_size[0])
        # batch_dict['volume_features'] = Voxels
        batch_dict['spatial_features_img'] = Voxels

        # from pcdet.utils.feat_vis_utils import visual_feature
        # visual_feature(spatial_features_2d, layer=f"{data_dict['frame_id'][0]}_bev_feat", path = "/cv/yc/DSGN2/img_bbox3d/cnn2d_")

        if self.voxel_occupancy:
            batch_dict = self.forward_voxel_occupancy(batch_dict, Voxel_for_geo)

        batch_dict['depth_preds'] = []
        batch_dict['depth_volumes'] = []
        batch_dict['depth_samples'] = self.depth.clone().detach().cuda()
        if self.front_surface_depth:
            batch_dict = self.forward_front_surface_depth_head(batch_dict, Voxel_for_geo, calibs_Proj)
        if pred_depth_lss is not None:
            batch_dict['depth_preds'].append(pred_depth_lss)
            batch_dict['depth_volumes'].append(depth_lss_volume_full)

        # batch_dict['time_backbone_3d'] = time.time()-start_time
        return batch_dict
    
class DSGN2BackboneCNN2DHalf(DSGN2BackboneCNN2D):
    def __init__(self, model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs):
        super().__init__(model_cfg, class_names, grid_size, voxel_size, point_cloud_range, **kwargs)

    def forward(self, batch_dict):
        self.half()
        left = batch_dict['left_img']
        right = batch_dict['right_img']
        calib = batch_dict['calib']
        fu_mul_baseline = torch.as_tensor(
             [x.fu_mul_baseline for x in calib], dtype=left.dtype, device=left.device)
        calibs_Proj = torch.as_tensor(
            [x.P2 for x in calib], dtype=left.dtype, device=left.device)
        
        N = batch_dict['batch_size']
        batched_images = torch.cat([left, right], dim=0)
        batched_images = batched_images.half()
        batched_features = self.feature_backbone(batched_images)
        batched_stereo_feat, batched_sem_feat = self.feature_neck(batched_features)

        left_stereo_feat, left_sem_feat = batched_stereo_feat[:N], batched_sem_feat[:N]
        right_stereo_feat, right_sem_feat = batched_stereo_feat[N:], batched_sem_feat[N:]

        batch_dict['sem_features'] = self.sem_neck([left_sem_feat])

        # stereo matching: build stereo volume
        downsampled_depth = self.downsampled_depth.cuda()
        downsampled_disp = fu_mul_baseline[:, None] / \
            downsampled_depth[None, :] / (self.downsample_disp if not self.fullres_stereo_feature else 1)
        self.data_to_cpp['downsampled_disp'] = downsampled_disp

        cost_raw = self.build_cost(left_stereo_feat, right_stereo_feat,
                                None, None, downsampled_disp.half())
        # stereo matching network
        D_cv, H_cv, W_cv = cost_raw.shape[2:]
        cost0 = self.dres0_relu(self.dres0_bn3d(self.dres0_conv2d(self.dres0_pool(cost_raw.view(N, -1, H_cv, W_cv))).view(N, -1, D_cv, int(H_cv/3), W_cv)))
        out = self.dres1_bn3d(self.dres1_conv2d(cost0.view(N, -1, int(H_cv/3), W_cv)).view(N, -1, D_cv, int(H_cv/3), W_cv)) + cost0

        # out = out.half()
            
        # convert plane-sweep into 3d volume
        #############
        self.data_to_cpp['coordinates_3d'] = self.coordinates_3d
        #############
        coordinates_3d = self.coordinates_3d.cuda()
        norm_coord_imgs = []

        valids2d = []
        for i in range(N):
            # map to rect camera coordinates
            c3d = coordinates_3d.view(-1, 3)
            if 'random_T' in batch_dict:
                random_T = batch_dict['random_T'][i]
                c3d = torch.matmul(c3d, random_T[:3, :3].T) + random_T[:3, 3]

            # in pseudo lidar coord
            c3d = project_pseudo_lidar_to_rectcam(c3d)
            #------------ left images ----------------------
            coord_img, norm_coord_img = self.compute_mapping(c3d,
                left.shape[2:],
                torch.as_tensor(calib[i].P2, device='cuda', dtype=torch.float32),
                [self.CV_DEPTH_MIN, self.CV_DEPTH_MAX])
            coord_img = coord_img.view(*self.coordinates_3d.shape[:3], 3)
            norm_coord_img = norm_coord_img.view(*self.coordinates_3d.shape[:3], 3)
            norm_coord_imgs.append(norm_coord_img)

            # valid: within images
            img_shape = batch_dict['image_shape'][i]
            valid_mask_2d = (coord_img[..., 0] >= 0) & (coord_img[..., 0] <= img_shape[1]) & \
                (coord_img[..., 1] >= 0) & (coord_img[..., 1] <= img_shape[0])
            valids2d.append(valid_mask_2d)

        norm_coord_imgs = torch.stack(norm_coord_imgs, dim=0)
        #############
        self.data_to_cpp['norm_coord_imgs'] = norm_coord_imgs

        valids2d = torch.stack(valids2d, dim=0)
        batch_dict['norm_coord_imgs'] = norm_coord_imgs

        valids = valids2d & (norm_coord_imgs[..., 2] >= -1.) & (norm_coord_imgs[..., 2] <= 1.)
        batch_dict['valids'] = valids
        #############
        self.data_to_cpp['valids'] = valids
        #############
        
 
        # Retrieve Voxel Feature from Cost Volume Feature
        Voxel_psv = F.grid_sample(out, norm_coord_imgs.to(out.dtype), align_corners=True) # out: ([1, 32, 36, 96, 224]) norm_coord_img:([1, 10, 152, 144, 3])  # Voxel:([1, 32, 10, 152, 144])
        Voxel_psv = Voxel_psv * valids[:, None, :, :, :].to(Voxel_psv.dtype)


        # TODO: build LSS volume
        Voxel_lss_3d = None
        Voxel_2D = None
        pred_depth_lss = None
        left_lss_feat = self.mono_depthnet(batch_dict['sem_features'][-1].to(Voxel_psv.dtype))
        depth_lss_volume = left_lss_feat[:, :self.maxdisp//self.downsample_disp] # ([1, 72, 120, 232])
        depth_lss_volume_full = F.interpolate(depth_lss_volume[:,None,], [self.maxdisp, *batch_dict['left_img'].shape[2:]],
                                        mode='trilinear',
                                    align_corners=True).squeeze(1)
        depth_lss_softmax = depth_lss_volume_full.softmax(dim=1, dtype=left_lss_feat.dtype)
        pred_depth_lss = self.dispregression(depth_lss_softmax.squeeze(1), self.depth.cuda()) # training only

        depth_lss_volume_s = depth_lss_volume.softmax(1)
        left_lss_volume = depth_lss_volume_s.unsqueeze(1) *  left_lss_feat[:,self.maxdisp//self.downsample_disp:,:].unsqueeze(2) # ([1, 288, 60, 116]), ([1, 32, 60, 116])
        left_lss_volume = self._forward_voxel_net(left_lss_volume)
        Voxel_lss_3d = F.grid_sample(left_lss_volume, norm_coord_imgs.to(left_lss_volume.dtype), align_corners=True)
        Voxel_lss_3d = Voxel_lss_3d * valids[:, None, :, :, :].to(Voxel_lss_3d.dtype)

        Voxels =torch.cat((Voxel_psv,Voxel_lss_3d),dim=1)

        Voxels = self.rpn3d_convs(Voxels) 
        

        if self.sup_geometry == 'volume':
            Voxel_for_geo = Voxels
            
        # Voxels = self.rpn3d_pool(Voxels)
        Voxels = Voxels.view(N, -1, self.grid_size[1], self.grid_size[0])
        # batch_dict['volume_features'] = Voxels
        batch_dict['spatial_features'] = Voxels

        # from pcdet.utils.feat_vis_utils import visual_feature
        # visual_feature(spatial_features_2d, layer=f"{data_dict['frame_id'][0]}_bev_feat", path = "/cv/yc/DSGN2/img_bbox3d/cnn2d_")

        if self.voxel_occupancy:
            batch_dict = self.forward_voxel_occupancy(batch_dict, Voxel_for_geo)

        batch_dict['depth_preds'] = []
        batch_dict['depth_volumes'] = []
        batch_dict['depth_samples'] = self.depth.clone().detach().cuda()
        if self.front_surface_depth:
            batch_dict = self.forward_front_surface_depth_head(batch_dict, Voxel_for_geo, calibs_Proj)
        if pred_depth_lss is not None:
            batch_dict['depth_preds'].append(pred_depth_lss)
            batch_dict['depth_volumes'].append(depth_lss_volume_full)

        # batch_dict['time_backbone_3d'] = time.time()-start_time
        return batch_dict