import torch
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.models.detectors_lidar.lidar_detector3d_template import Detector3DTemplate

from .. import backbones_3d_stereo
from pcdet.models.backbones_2d import fuser
from pcdet.utils.common_utils import T
from pcdet.models.dense_heads.depth_loss_head import DepthLossHead
from pcdet.models import backbones_2d

class DSGNVoxelrcnnFusion(Detector3DTemplate):
    def __init__(self, model_cfg, num_class, dataset):
        super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
        # super(Detector3DTemplate, self).__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
        self.model_cfg = model_cfg
        self.num_class = num_class
        self.dataset = dataset
        self.class_names = dataset.class_names
        self.register_buffer('global_step', torch.LongTensor(1).zero_())

        self.module_topology = [
            'vfe', 'backbone_3d', 'map_to_bev_module',
            'image_backbone', 'image_backbone_2d', 'fuser', 
            'backbone_2d', 'dense_head',  'point_head', 'roi_head', 'depth_loss_head'
        ]
        self.module_list = self.build_networks()
    
    def build_networks(self, trt=False):
        model_info_dict = {
            'module_list': [],
            'fixed_module_list': [],
            'stereo_grid_size': getattr(self.dataset, 'stereo_grid_size', self.dataset.grid_size),
            'grid_size': self.dataset.grid_size,
            'point_cloud_range': self.dataset.point_cloud_range,
            'stereo_voxel_size': getattr(self.dataset, 'stereo_voxel_size', self.dataset.voxel_size),
            'voxel_size': self.dataset.voxel_size,
            'boxes_gt_in_cam2_view': getattr(self.dataset, 'boxes_gt_in_cam2_view', False),
            'stereo_point_cloud_range': self.dataset.stereo_point_cloud_range,
            'max_crop_shape': getattr(self.dataset, 'max_crop_shape', None),
            'trt':trt,
            'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
            'num_point_features': self.dataset.point_feature_encoder.num_point_features,
        }

        for module_name in self.module_topology:
            module, model_info_dict = getattr(self, 'build_%s' % module_name)(
                model_info_dict=model_info_dict
            )
            self.add_module(module_name, module)
        self.model_info_dict = model_info_dict

        if self.model_cfg.get('FREEZE_RPN', False):
            for module_name in self.module_topology:
                module = getattr(self, module_name, None)
                if module is None or module_name == 'roi_head': continue
                for param in module.parameters():
                    param.requires_grad_(False)

        return model_info_dict['module_list']
    
    def build_neck(self,model_info_dict):
        if self.model_cfg.get('NECK', None) is None:
            return None, model_info_dict
        neck_module = img_neck.__all__[self.model_cfg.NECK.NAME](
            model_cfg=self.model_cfg.NECK
        )
        model_info_dict['module_list'].append(neck_module)

        return neck_module, model_info_dict 
    

    def build_image_backbone(self, model_info_dict):
        if self.model_cfg.get('IMAGE_BACKBONE', None) is None:
            return None, model_info_dict
        if 'trt' in model_info_dict and model_info_dict['trt']:
            self.model_cfg.BACKBONE_3D.NAME += 'TRT'

        backbone_3d_module = backbones_3d_stereo.__all__[self.model_cfg.IMAGE_BACKBONE.NAME](
            model_cfg=self.model_cfg.IMAGE_BACKBONE,
            class_names=self.class_names,
            grid_size=model_info_dict['stereo_grid_size'],
            voxel_size=model_info_dict['stereo_voxel_size'],
            point_cloud_range=model_info_dict['point_cloud_range'],
            boxes_gt_in_cam2_view=model_info_dict['boxes_gt_in_cam2_view'],
            stereo_point_cloud_range=model_info_dict['stereo_point_cloud_range'],
            max_crop_shape=model_info_dict['max_crop_shape'])
        model_info_dict['module_list'].append(backbone_3d_module)
        return backbone_3d_module, model_info_dict

    def build_image_backbone_2d(self, model_info_dict):
        if self.model_cfg.get('IMAGE_BACKBONE_2D', None) is None:
            return None, model_info_dict

        backbone_2d_module = backbones_2d.__all__[self.model_cfg.IMAGE_BACKBONE_2D.NAME](
            model_cfg=self.model_cfg.IMAGE_BACKBONE_2D,
            input_channels=self.model_cfg.IMAGE_BACKBONE_2D.num_bev_features
        )
        model_info_dict['module_list'].append(backbone_2d_module)
        model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
        return backbone_2d_module, model_info_dict
    
    def build_depth_loss_head(self, model_info_dict):
        # import pdb; pdb.set_trace()
        assert not self.model_cfg.BACKBONE_3D.get('front_surface_depth', False) or \
                (self.model_cfg.BACKBONE_3D.get('drop_psv_loss', False) or self.model_cfg.BACKBONE_3D.get('drop_psv', False))
        if (self.model_cfg.get('DEPTH_LOSS_HEAD', None) is None or self.model_cfg.BACKBONE_3D.get('drop_psv', False)) and \
            not self.model_cfg.BACKBONE_3D.get('front_surface_depth', False):
            return None, model_info_dict
        depth_loss_head = DepthLossHead(
            model_cfg=self.model_cfg.DEPTH_LOSS_HEAD,
            point_cloud_range=getattr(model_info_dict, 'stereo_point_cloud_range', model_info_dict['point_cloud_range']),
        )
        model_info_dict['module_list'].append(depth_loss_head)
        return depth_loss_head, model_info_dict
    

    def build_fuser(self, model_info_dict):
        if self.model_cfg.get('FUSER', None) is None:
            return None, model_info_dict
    
        fuser_module = fuser.__all__[self.model_cfg.FUSER.NAME](
            model_cfg=self.model_cfg.FUSER
        )
        model_info_dict['module_list'].append(fuser_module)
        model_info_dict['num_bev_features'] = self.model_cfg.FUSER.OUT_CHANNEL
        return fuser_module, model_info_dict
    
    def forward(self, batch_dict):
        for cur_module in self.module_list:
            batch_dict = cur_module(batch_dict)

        if self.model_cfg.get('RETURN_BATCH_DICT', False):
            keys_to_remove = ['sem_features',
                'rpn_feature',
                'valids',
                'norm_coord_imgs',
                'volume_features_nopool',
                ]
            for k in keys_to_remove:
                batch_dict.pop(k, None)
            # 'spatial_features_2d_prehg',
            keys_to_keep = ['spatial_features_stride',
                            'spatial_features',
                            'spatial_features_2d',
                            'volume_features',
                            'batch_cls_preds',
                            'batch_box_preds']
            batch_dict['lidar_outputs'] = {} # teacher outputs
            for k in keys_to_keep:
                if k in batch_dict:
                    batch_dict['lidar_outputs'][k] = batch_dict.pop(k)
            
            return batch_dict

        if self.training:
            loss, tb_dict, disp_dict = self.get_training_loss(batch_dict)

            ret_dict = {
                'loss': loss
            }
            return ret_dict, tb_dict, disp_dict
        else:
            pred_dicts, ret_dicts = self.post_processing(batch_dict)
            for k in batch_dict.keys():
                if k.startswith('depth_error_'):
                    if isinstance(batch_dict[k], list):
                        ret_dicts[k] = batch_dict[k]
                    elif len(batch_dict[k].shape) == 0:
                        ret_dicts[k] = batch_dict[k].item()

            if getattr(self, 'dense_head_2d', None) and 'boxes_2d_pred' in batch_dict:
                assert len(pred_dicts) == len(batch_dict['boxes_2d_pred'])
                for pred_dict, pred_2d_dict in zip(pred_dicts, batch_dict['boxes_2d_pred']):
                    pred_dict['pred_boxes_2d'] = pred_2d_dict['pred_boxes_2d']
                    pred_dict['pred_scores_2d'] = pred_2d_dict['pred_scores_2d']
                    pred_dict['pred_labels_2d'] = pred_2d_dict['pred_labels_2d']
            pred_dicts[0]['batch_dict'] = batch_dict

            return pred_dicts, ret_dicts

    def get_training_loss(self, batch_dict):
        disp_dict = {}

        loss = 0.
        tb_dict = {}
        if getattr(self, 'dense_head', None):
            loss_rpn, tb_dict = self.dense_head.get_loss()
            loss += loss_rpn
            tb_dict.update(loss_rpn = loss_rpn.item())
        

        if (not self.model_cfg.BACKBONE_3D.get('drop_psv', False) and not self.model_cfg.BACKBONE_3D.get('drop_psv_loss', False)) or self.model_cfg.BACKBONE_3D.get('front_surface_depth', False):
            loss_depth, tb_dict = self.depth_loss_head.get_loss(batch_dict, tb_dict)
            tb_dict.update(loss_depth = loss_depth.item())
            if torch.isnan(loss_depth):
                loss += sum([i.sum() for i in batch_dict['depth_preds']]) * 0.
                print('-------------- NaN depth loss')
            else:
                loss += loss_depth

        if self.model_cfg.get('DRLOC_LOSS', False):
            loss_drloc, tb_dict = self.backbone_3d.get_loss(batch_dict, tb_dict)
            tb_dict.update(loss_drloc = loss_drloc.item())
            if torch.isnan(loss_drloc):
                print('-------------- NaN drloc loss')
            else:
                loss += loss_drloc
        if self.model_cfg.get('VOXEL_LOSS_HEAD', None):
            loss_voxel, tb_dict = self.voxel_loss_head.get_loss(batch_dict, tb_dict)
            tb_dict.update(loss_voxel=loss_voxel.item())
            if torch.isnan(loss_voxel):
                loss += batch_dict['voxel_occupancy'].sum() * 0.
                print('-------------- NaN depth loss')
            else:
                loss += loss_voxel

        if self.model_cfg.get('RANGE_LOSS_HEAD', None):
            loss_range, tb_dict = self.range_loss_head.get_loss(batch_dict, tb_dict)
            tb_dict.update(loss_range=loss_range.item())
            if torch.isnan(loss_range):
                loss += (sum([i.sum() for i in batch_dict['range_voxel_occupancy']]) + batch_dict['voxel_occupancy'].sum()) * 0.
                print('-------------- NaN depth loss')
            else:
                loss += loss_range

        if getattr(self, 'dense_head_2d', None):
            loss_rpn_2d, tb_dict = self.dense_head_2d.get_loss(batch_dict, tb_dict)
            tb_dict['loss_rpn2d'] = loss_rpn_2d.item()
            loss += loss_rpn_2d
        
        if self.model_cfg.get('ROI_HEAD', None):
            loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
            tb_dict['loss_rcnn'] = loss_rcnn.item()
            loss += loss_rcnn

        return loss, tb_dict, disp_dict

    def get_iou_map(self, batch_dict):
        batch_size = batch_dict['batch_size']
        iou_map_results = []

        for index in range(batch_size):
            if batch_dict.get('batch_index', None) is not None:
                assert batch_dict['batch_box_preds'].shape.__len__() == 2
                batch_mask = (batch_dict['batch_index'] == index)
            else:
                assert batch_dict['batch_box_preds'].shape.__len__() == 3
                batch_mask = index

            box_preds = batch_dict['batch_box_preds'][batch_mask]  # [N_anchors, 7]
            gt_boxes = batch_dict['gt_boxes'][index]

            if gt_boxes.shape[0] <= 0:
                iou_map_results.append(None)
            else:
                iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(
                    box_preds[:, 0:7], gt_boxes[:, 0:7])
                iou_map_results.append(iou3d_roi.detach().cpu().numpy())

        return iou_map_results
