import torch
import torch.nn as nn
from mmcv.runner import force_fp32
from mmdet.models import DETECTORS
from mmdet3d.models.builder import build_head
import torch
from .bevdet import BEVDet4D, BEVDepth4D



@DETECTORS.register_module()
class BEVDepth4DOCC_E2E(BEVDepth4D):

    def __init__(self, 
                 planner_head=None,
                 embed_dims=160,
                 occ_head=None,
                 with_ego_status=False,
                  **kwargs):
        super(BEVDepth4DOCC_E2E, self).__init__(**kwargs)
        self.planner_head = build_head(planner_head)
        self.occ_head = build_head(occ_head)
        self.with_ego_status = with_ego_status
        self.embed_dims = embed_dims
        if self.with_ego_status:
            self.can_bus_mlp = nn.Sequential(
                nn.Linear(9, self.embed_dims // 2),
                nn.ReLU(inplace=True),
                nn.Linear(self.embed_dims // 2, self.embed_dims),
                nn.ReLU(inplace=True),
                nn.LayerNorm(self.embed_dims)
            )


    def forward_train(self, 
                      points=None, 
                      img_metas=None,
                      gt_bboxes_3d=None, 
                      gt_labels_3d=None, 
                      gt_labels=None, 
                      gt_bboxes=None, 
                      img_inputs=None, 
                      proposals=None, 
                      gt_bboxes_ignore=None, 
                      **kwargs):
        img_feats, pts_feats, depth = self.extract_feat(
            points, img=img_inputs, img_metas=img_metas, **kwargs)
        
        planner_outs = self.planner_head(
            img_feats=img_feats, 
            gt_ego_lcf_feat=kwargs['gt_ego_lcf_feat'],
            gt_ego_fut_cmd=kwargs['gt_ego_fut_cmd'],
            # gt_ego_his_traj=kwargs['gt_ego_his_trajs'],
            # gt_ego_fut_trajs=kwargs['gt_ego_fut_trajs'],
            img_metas=img_metas
        )
        loss_planner = self.planner_head.loss(
            gt_ego_fut_trajs=kwargs['gt_ego_fut_trajs'],
            gt_ego_fut_cmd=kwargs['gt_ego_fut_cmd'],
            gt_ego_fut_masks=kwargs['gt_ego_fut_masks'],
            planner_outs=planner_outs,
            img_metas=img_metas,
        )
        losses = dict()
        losses.update(loss_planner)

        return losses


    def extract_img_feat(self,
                         img,
                         img_metas,
                         pred_prev=False,
                         sequential=False,
                         **kwargs):
        if sequential:
            return self.extract_img_feat_sequential(img, kwargs['feat_prev'])
        imgs, sensor2keyegos, ego2globals, intrins, post_rots, post_trans, \
        bda, _ = self.prepare_inputs(img)
        """Extract features of images."""
        bev_feat_list = []
        depth_list = []
        key_frame = True  # back propagation for key frame only
        for img, sensor2keyego, ego2global, intrin, post_rot, post_tran in zip(
                imgs, sensor2keyegos, ego2globals, intrins, post_rots, post_trans):
            if key_frame or self.with_prev:
                if self.align_after_view_transfromation:
                    sensor2keyego, ego2global = sensor2keyegos[0], ego2globals[0]
                mlp_input = self.img_view_transformer.get_mlp_input(
                    sensor2keyegos[0], ego2globals[0], intrin, post_rot, post_tran, bda)
                inputs_curr = (img, sensor2keyego, ego2global, intrin, post_rot,
                               post_tran, bda, mlp_input)
                if key_frame:
                    bev_feat, depth = self.prepare_bev_feat(*inputs_curr)
                else:
                    with torch.no_grad():
                        bev_feat, depth = self.prepare_bev_feat(*inputs_curr)
            else:
                bev_feat = torch.zeros_like(bev_feat_list[0])
                depth = None
            bev_feat_list.append(bev_feat)
            depth_list.append(depth)
            key_frame = False
        if pred_prev:
            assert self.align_after_view_transfromation
            assert sensor2keyegos[0].shape[0] == 1
            feat_prev = torch.cat(bev_feat_list[1:], dim=0)
            ego2globals_curr = \
                ego2globals[0].repeat(self.num_frame - 1, 1, 1, 1)
            sensor2keyegos_curr = \
                sensor2keyegos[0].repeat(self.num_frame - 1, 1, 1, 1)
            ego2globals_prev = torch.cat(ego2globals[1:], dim=0)
            sensor2keyegos_prev = torch.cat(sensor2keyegos[1:], dim=0)
            bda_curr = bda.repeat(self.num_frame - 1, 1, 1)
            return feat_prev, [imgs[0],
                               sensor2keyegos_curr, ego2globals_curr,
                               intrins[0],
                               sensor2keyegos_prev, ego2globals_prev,
                               post_rots[0], post_trans[0],
                               bda_curr]
        if self.align_after_view_transfromation:
            for adj_id in range(1, self.num_frame):
                bev_feat_list[adj_id] = \
                    self.shift_feature(bev_feat_list[adj_id],
                                       [sensor2keyegos[0],
                                        sensor2keyegos[adj_id]],
                                       bda)
        bev_feat = torch.cat(bev_feat_list, dim=1)

        if self.with_ego_status and 'can_bus_info' in kwargs:
            can_bus_info = kwargs['can_bus_info']
            if not self.training:
                can_bus_info = can_bus_info[0]
            can_bus_info = torch.cat(can_bus_info)
            can_bus_info_bevshape = self.can_bus_mlp(can_bus_info)
            can_bus_info_expand = can_bus_info_bevshape[:, :, None, None]
            bev_feat = bev_feat + can_bus_info_expand

        x = self.bev_encoder(bev_feat)
        return [x], depth_list[0]


    def simple_test(self,
                    points,
                    img_metas,
                    img=None,
                    rescale=False,
                    **kwargs):
        """Test function without augmentaiton."""
        img_feats, pts_feats, depth = self.extract_feat(
            points, img=img, img_metas=img_metas, **kwargs)

        output_list = [dict() for _ in range(len(img_metas))]

        planner_outs = self.planner_head(
            img_feats=img_feats, 
            gt_ego_lcf_feat=kwargs['gt_ego_lcf_feat'],
            gt_ego_fut_cmd=kwargs['gt_ego_fut_cmd'],
            # gt_ego_his_trajs=kwargs['gt_ego_his_trajs'],
            # gt_ego_fut_trajs=kwargs['gt_ego_fut_trajs'],
            img_metas=img_metas
        )
        pred_traj = self.planner_head.get_bboxes(
            planner_outs, 
            img_metas, 
            gt_ego_fut_trajs=kwargs['gt_ego_fut_trajs'][0],
            gt_ego_fut_cmd=kwargs['gt_ego_fut_cmd'][0], 
            gt_ego_fut_masks=kwargs['gt_ego_fut_masks'][0], 
            gt_fut_segmentations=kwargs['gt_fut_segmentations'][0],
            gt_fut_segmentations_plus=kwargs['gt_fut_segmentations_plus'][0],
            # vad_ego_fut_trajs=kwargs['vad_ego_fut_trajs'][0],
            )
        pred_traj[0]['index'] =  img_metas[0]['index']
        assert len(img_metas) == 1
        for i, result_dict in enumerate(output_list):
            # result_dict['pts_bbox'] = pred_bbox[i]
            # result_dict['pred_map'] = pred_map[i]
            # result_dict['pred_motion'] = pred_motion[i]
            result_dict['pred_ego_traj'] = pred_traj[i]
            # result_dict['pred_occupancy'] = pred_occupancy_category
            result_dict['index'] = img_metas[i]['index']
        return output_list

    def forward_dummy(self,
                      points=None,
                      img_metas=None,
                      img_inputs=None,
                      **kwargs):
        img_feats, _, _ = self.extract_feat(
            points, img=img_inputs, img_metas=img_metas, **kwargs)
        assert self.with_pts_bbox
        outs = self.pts_bbox_head(img_feats)
        return outs
