import copy
from collections import defaultdict
import logging
import mindspore
import numpy as np2
from mindspore import nn, ops, Tensor, numpy
from mind3d.utils.sim_nms_gpu import nms_gpu
from mind3d.models.losses.sim_loss import FastFocalLoss, RegLoss


class SepHead(nn.Cell):
    def __init__(
            self,
            in_channels,
            heads,
            head_conv=64,
            final_kernel=1,
            bn=False,
            init_bias=-2.19,
            
    ):
        super(SepHead, self).__init__()

        self.heads = heads
        self.tasks1=nn.CellList([])
        self.names1=[]
        
        # print(heads)
        for head in self.heads.keys():
            classes, num_conv = self.heads[head]
            fc = nn.SequentialCell()
            if 'hm' in head:
                for i in range(num_conv - 1):
                    fc.append(nn.Conv2d(in_channels, head_conv,
                                        kernel_size=final_kernel, padding=final_kernel // 2,
                                        pad_mode='pad', has_bias=True, weight_init="he_normal"))
                    if bn:
                        fc.append(nn.BatchNorm2d(head_conv, momentum=0.90))
                    fc.append(nn.ReLU())
                fc.append(nn.Conv2d(head_conv, classes,
                                    kernel_size=final_kernel, padding=final_kernel // 2, pad_mode='pad', has_bias=True, weight_init="he_normal"))
            else:
                for i in range(num_conv - 1):
                    fc.append(nn.Conv2d(in_channels, head_conv,
                                        kernel_size=final_kernel, padding=final_kernel // 2, weight_init='HeUniform',
                                        pad_mode='pad', has_bias=True))
                    if bn:
                        fc.append(nn.BatchNorm2d(head_conv, momentum=0.90))
                    fc.append(nn.ReLU())
                fc.append(nn.Conv2d(head_conv, classes,
                                    kernel_size=final_kernel, padding=final_kernel // 2, weight_init='HeUniform',
                                    pad_mode='pad', has_bias=True))
            self.names1.append(head)
            # self.tasks1.append(fc)
            if 'hm' in head:
                self.hm=fc
            elif "vel" in head:
                self.vel=fc
            elif "rot" in head:
                self.rot=fc
            elif "dim" in head:
                self.dim=fc
            elif "height" in head:
                self.height=fc
            elif "reg" in head:
                self.reg=fc
            # self.tasks1[head].update_parameters_name("sep"+head)
            # self.__setattr__(head, fc)

    def construct(self, x):
        ret_dict = {}
        for name in self.names1:
            # ret_dict[head] = self.__getattr__(head)(x)
            if 'hm' == name:
                ret_dict[name] = self.hm(x)
            elif "vel" == name:
                ret_dict[name] = self.vel(x)
            elif "rot" == name:
                ret_dict[name] = self.rot(x)
            elif "dim" == name:
                ret_dict[name] = self.dim(x)
            elif "height" == name:
                ret_dict[name] = self.height(x)
            elif "reg" == name:
                ret_dict[name] = self.reg(x)
            # cal= self.tasks1[idx]
            # ret_dict[self.names1[idx]] = cal(x)

        return ret_dict


class CenterHeadV2(nn.Cell):        
    """
   Simtrack network head.
        The head of Simtrack network.

    Args:
        in_channels (int): Input channel.
        init_bias(int)：init bias.
        share_conv_channel(int)：Share channel.

    Returns:
        Tensor, output tensor.
    """ 
    def __init__(
            self,
            in_channels,
            tasks,
            weight,
            code_weights,
            common_heads,
            logger=None,
            init_bias=-2.19,
            share_conv_channel=64,
            num_hm_conv=2,
    ):
        super(CenterHeadV2, self).__init__()
        # print()
        num_classes = []
        for t in tasks:
            num_classes.append(len(t["class_names"]))
        self.class_names=[]
        for t in tasks:
            self.class_names.append(t["class_names"])
        # self.class_names = [t["class_names"] for t in tasks]
        self.code_weights = code_weights
        self.weight = weight  # weight between hm loss and loc loss

        self.in_channels = in_channels
        self.num_classes = num_classes

        self.crit = FastFocalLoss()
        self.crit_reg = RegLoss()

        if not logger:
            logger = logging.getLogger("CenterHead")
        self.logger = logger

        logger.info(
            f"num_classes: {num_classes}"
        )

        # a shared convolution
        self.shared_conv = nn.SequentialCell(
            nn.Conv2d(in_channels, share_conv_channel,
                      kernel_size=3, padding=1, pad_mode='pad', has_bias=True, weight_init="he_normal"),
            # nn.BatchNorm2d(share_conv_channel),  
            nn.BatchNorm2d(share_conv_channel),
            
            nn.ReLU()
        )

        self.tasks = nn.CellList([])
        idx=0
        for num_cls in num_classes:
            # heads = copy.deepcopy(common_heads)
            heads=common_heads
            heads.update(dict(hm=(num_cls, num_hm_conv)))
            self.tasks.append(
                SepHead(share_conv_channel, heads, bn=True, init_bias=init_bias, final_kernel=3)
            )
            # self.tasks.update_parameters_name("sephead"+str(idx))
            idx+=1

        logger.info("Finish CenterHead Initialization")

    def construct(self, x):
        ret_dicts = []
        x = self.shared_conv(x)
        
        for task in self.tasks:
            ret_dicts.append(task(x))
            
        return ret_dicts

    def _sigmoid(self, x):
        min_value = Tensor(1e-4, mindspore.float32)
        max_value = Tensor(1 - 1e-4, mindspore.float32)
        sigmoid = nn.Sigmoid()
        y = ops.clip_by_value(sigmoid(x), min_value, max_value)
        return y

    def loss(self, example, preds_dicts):
        rets = []
        
        for task_id, preds_dict in enumerate(preds_dicts):
            # heatmap focal loss
            preds_dict['hm'] = self._sigmoid(preds_dict['hm'])
            hm_idx = 'hm' + str(task_id)
            hm_loss = self.crit(preds_dict['hm'], example[hm_idx], example['ind'][task_id,:,:],
                                example['mask'][task_id,:,:], example['cat'][task_id,:,:])

            target_box = example['anno_box'][task_id,:,:,:]
            # reconstruct the anno_box from multiple reg heads
            cat = ops.Concat(axis=1)
            preds_dict['anno_box'] = cat((preds_dict['reg'], preds_dict['height'], preds_dict['dim'],
                                          preds_dict['vel'], preds_dict['rot']))

            ret = {}

            # Regression loss for dimension, offset, height, rotation
            box_loss = self.crit_reg(preds_dict['anno_box'], example['mask'][task_id,:,:], example['ind'][task_id,:,:],
                                     target_box)

            loc_loss = (box_loss * self.code_weights).sum()

            loss = hm_loss + self.weight * loc_loss

            ret.update({'loss': loss, 'hm_loss': hm_loss, 'loc_loss': loc_loss,
                        'loc_loss_elem': box_loss, 'num_positive': example['mask'][task_id,:,:].sum()})

            rets.append(ret)

        """convert batch-key to key-batch
        """
        rets_merged = defaultdict(list)
        for ret in rets:
            for k, v in ret.items():
                rets_merged[k].append(v)
        
        # print('loss rets_merged len', len(rets_merged))
        return rets_merged

    def predict(self, example, preds_dicts, test_cfg):
        """decode, nms, then return the detection result. Additionaly support double flip testing
        """
        # get loss info
        rets = []
        metas = []

        double_flip = test_cfg.get('double_flip', False)

        post_center_range = test_cfg['post_center_limit_range']
        if len(post_center_range) > 0:
            post_center_range = Tensor(post_center_range)

        # for task_id in preds_dicts.keys():
        for preds_dict in preds_dicts:
            # preds_dict=preds_dicts[task_id]
            # convert N C H W to N H W C
            for key, val in preds_dict.items():
                preds_dict[key] = val.transpose((0, 2, 3, 1))
        
            # meta_list = example['token']

            sigmoid = ops.Sigmoid()
            exp = ops.Exp()
            atan2 = ops.Atan2()
            batch_hm = sigmoid(preds_dict['hm'])
            batch_dim = exp(preds_dict['dim'])
            batch_rots = preds_dict['rot'][..., 0:1]
            batch_rotc = preds_dict['rot'][..., 1:2]
            batch_reg = preds_dict['reg']
            batch_hei = preds_dict['height']

            batch_rot = atan2(batch_rots, batch_rotc)

            batch, H, W, num_cls = batch_hm.shape

            reshape = ops.Reshape()
            batch_reg = reshape(batch_reg, (batch, H * W, 2))
            batch_hei = reshape(batch_hei, (batch, H * W, 1))
            batch_rot = reshape(batch_rot, (batch, H * W, 1))
            batch_dim = reshape(batch_dim, (batch, H * W, 3))
            batch_hm = reshape(batch_hm, (batch, H * W, num_cls))

            meshgrid = ops.Meshgrid(indexing="ij")
            cat = ops.Concat(2)

            ys, xs = meshgrid((numpy.arange(0, H), numpy.arange(0, W)))

            ys = numpy.tile(ys.view(1, H, W), (batch, 1, 1))
            xs = numpy.tile(xs.view(1, H, W), (batch, 1, 1))

            xs = xs.view(batch, -1, 1) + batch_reg[:, :, 0:1]
            ys = ys.view(batch, -1, 1) + batch_reg[:, :, 1:2]

            xs = xs * test_cfg['out_size_factor'] * test_cfg['voxel_size'][0] + test_cfg['pc_range'][0]
            ys = ys * test_cfg['out_size_factor'] * test_cfg['voxel_size'][1] + test_cfg['pc_range'][1]

            batch_vel = preds_dict['vel']

            batch_vel = reshape(batch_vel, (batch, H * W, 2))
            batch_box_preds = cat([xs, ys, batch_hei, batch_dim, batch_vel, batch_rot])

            # metas.append(meta_list)

            rets.append(self.post_processing(batch_box_preds, batch_hm, test_cfg, post_center_range))
        # return rets
        # Merge branches results
        num_samples = len(rets[0])

        ret_list = []
        concat = ops.Concat()
        for i in range(num_samples):
            ret = {}
            for k in rets[0][i].keys():
                retss2 = []
                if k in ["box3d_lidar", "scores"]:
                    for retss in rets:
                        if str(retss[i][k]) != '[]':
                            retss1 = retss[i][k]
                            retss2.append(retss1)
                    ret[k] = concat([retss3 for retss3 in retss2])
                elif k in ["label_preds"]:
                    flag = 0
                    for j, num_class in enumerate(self.num_classes):
                        rets[j][i][k] += flag
                        flag += num_class
                    for retss in rets:
                        if str(retss[i][k]) != '[]':
                            retss1 = retss[i][k]
                            retss2.append(retss1)
                    ret[k] = concat([retss3 for retss3 in retss2])
            if str(ret) == '{}':
                ret = rets[0][0]
            # ret['token'] = metas[0]
            ret_list.append(ret)
            
        return ret_list

    def post_processing(self, batch_box_preds, batch_hm, test_cfg, post_center_range):
        batch_size = len(batch_hm)

        prediction_dicts = []
        for i in range(batch_size):
            box_preds = batch_box_preds[i]
            hm_preds = batch_hm[i]

            labels, scores = ops.ArgMaxWithValue(axis=-1)(hm_preds)

            score_mask = scores > test_cfg['score_threshold']
            distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all(1).asnumpy() \
                            & (box_preds[..., :3] <= post_center_range[3:]).all(1).asnumpy()

            mask = distance_mask & score_mask.asnumpy()

            mask = (Tensor(mask, mindspore.bool_) * 1).nonzero().squeeze(axis=1)
            box_preds = box_preds.gather(mask, axis=0)
            scores = scores.gather(mask, axis=0)
            labels = labels.gather(mask, axis=0)

            if str(box_preds.asnumpy()) != '[]':
                boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]]
                # print(boxes_for_nms, scores)
                # mindspore 版
                selected = nms_gpu(boxes_for_nms, scores,
                                   thresh=test_cfg['nms']['nms_iou_threshold'],
                                   pre_maxsize=test_cfg['nms']['nms_pre_max_size'],
                                   post_max_size=test_cfg['nms']['nms_post_max_size'])

                # torch 版
                # import torch
                # boxes_for_nms = torch.tensor(boxes_for_nms.asnumpy()).to('cuda')
                # scores = torch.tensor(scores.asnumpy()).to('cuda')
                # selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms, scores,
                #                                           thresh=test_cfg['nms']['nms_iou_threshold'],
                #                                           pre_maxsize=test_cfg['nms']['nms_pre_max_size'],
                #                                           post_max_size=test_cfg['nms']['nms_post_max_size'])
                # boxes_for_nms = mindspore.Tensor(boxes_for_nms.to('cpu').numpy(), mindspore.float32)
                # scores = mindspore.Tensor(scores.to('cpu').numpy(), mindspore.float32)
                # selected = mindspore.Tensor(selected.to('cpu').numpy(), mindspore.int64)
                # 检验结果是否一致
                # assert np2.allclose(selected.asnumpy(), selected2.to('cpu').numpy()), [selected, selected2]
                # print(selected, selected2)

            else:
                # selected = torch.tensor([], device='cuda', dtype=torch.int64)
                selected = scores.astype('int32')

            # selected = mindspore.Tensor(selected.to('cpu').numpy(), mindspore.int64)

            selected_boxes = box_preds.gather(selected, axis=0)
            selected_scores = scores.gather(selected, axis=0)
            selected_labels = labels.gather(selected, axis=0)

            prediction_dict = {
                'box3d_lidar': selected_boxes,
                'scores': selected_scores,
                'label_preds': selected_labels
            }

            prediction_dicts.append(prediction_dict)

        return prediction_dicts

    def predict_tracking(self, example, preds_dicts, test_cfg, **kwargs):
        """decode, nms, then return the detection result.
        """
        rets = []
        metas = []
        post_center_range = test_cfg['post_center_limit_range']
        if len(post_center_range) > 0:
            post_center_range = Tensor(post_center_range, mindspore.float32)

        prev_track_id = kwargs.get('prev_track_id', None)

        if prev_track_id is not None:
            track_rets = []
            prev_hm = kwargs['prev_hm']

        for task_id, preds_dict in enumerate(preds_dicts):
            new_obj = [{}]
            # convert N C H W to N H W C
            for key, val in preds_dict.items():
                preds_dict[key] = val.transpose((0, 2, 3, 1))

            batch_size = preds_dict['hm'].shape[0]
            meta_list = example["token"]

            ######################################################
            sigmoid = ops.Sigmoid()
            exp = ops.Exp()
            atan2 = ops.Atan2()
            batch_hm = sigmoid(preds_dict['hm'])
            batch_dim = exp(preds_dict['dim'])
            batch_rots = preds_dict['rot'][..., 0:1]
            batch_rotc = preds_dict['rot'][..., 1:2]
            batch_reg = preds_dict['reg']
            batch_hei = preds_dict['height']

            batch_rot = atan2(batch_rots, batch_rotc)

            batch, H, W, num_cls = batch_hm.shape

            reshape = ops.Reshape()
            batch_reg = reshape(batch_reg, (batch, H * W, 2))
            batch_hei = reshape(batch_hei, (batch, H * W, 1))
            batch_rot = reshape(batch_rot, (batch, H * W, 1))
            batch_dim = reshape(batch_dim, (batch, H * W, 3))
            batch_hm = reshape(batch_hm, (batch, H * W, num_cls))

            meshgrid = ops.Meshgrid(indexing="ij")
            expand_dims = ops.ExpandDims()
            cat = ops.Concat(2)
            ys, xs = meshgrid((numpy.arange(0, H), numpy.arange(0, W)))
            ys = expand_dims(ys, 0)
            xs = expand_dims(xs, 0)
            xs = xs.view(batch, -1, 1) + batch_reg[:, :, 0:1]
            ys = ys.view(batch, -1, 1) + batch_reg[:, :, 1:2]

            xs = xs * test_cfg['out_size_factor'] * test_cfg['voxel_size'][0] + test_cfg['pc_range'][0]
            ys = ys * test_cfg['out_size_factor'] * test_cfg['voxel_size'][1] + test_cfg['pc_range'][1]

            batch_vel = preds_dict['vel']

            batch_vel = reshape(batch_vel, (batch, H * W, 2))
            batch_box_preds = cat([xs, ys, batch_hei, batch_dim, batch_vel, batch_rot])

            metas.append(meta_list)

            if prev_track_id is not None:
                tracking_batch_hm = (batch_hm + prev_hm[task_id]) / 2.0
                tracking = self.post_processing_tracking(batch_box_preds, tracking_batch_hm, prev_track_id[task_id],
                                                         test_cfg, post_center_range)

                for bit in range(len(tracking)):
                    if str(tracking[bit]['tracking_id']) == '[]':
                        new_obj = tracking
                    else:
                        cond = ((tracking[bit]['tracking_id'] == -1) * 1.0).nonzero().squeeze(axis=1)  # new obj
                        for tk in tracking[0].keys():
                            if tk != 'tracking_id':
                                new_obj[bit][tk] = tracking[bit][tk].gather(cond, axis=0)

                for bit in range(len(tracking)):
                    if str(tracking[bit]['tracking_id']) != '[]':
                        cond = ((tracking[bit]['tracking_id'] != -1) * 1.0).nonzero().squeeze(axis=1)
                        for tk in tracking[0].keys():
                            tracking[bit][tk] = tracking[bit][tk].gather(cond, axis=0)

                track_rets.append(tracking)

            else:
                new_obj = self.post_processing(batch_box_preds, batch_hm, test_cfg, post_center_range)

            rets.append(new_obj)

        # Merge branches results
        ret_list = []
        concat = ops.Concat()
        num_samples = len(rets[0])
        for i in range(num_samples):
            ret = {}
            for k in rets[0][i].keys():
                retss2 = []
                if k in ["box3d_lidar", "scores", "selected_id"]:
                    for retss in rets:
                        if str(retss[i][k]) != '[]':
                            retss1 = retss[i][k]
                            retss2.append(retss1)
                    if str(retss2) != '[]':
                        ret[k] = concat([retss3 for retss3 in retss2])
                elif k in ["label_preds"]:
                    flag = 0
                    for j, num_class in enumerate(self.num_classes):
                        rets[j][i][k] += flag
                        flag += num_class
                    for retss in rets:
                        if str(retss[i][k]) != '[]':
                            retss1 = retss[i][k]
                            retss2.append(retss1)
                    if str(retss2) != '[]':
                        ret[k] = concat([retss3 for retss3 in retss2])
            if str(ret) == '{}':
                # ret = rets[0][0]
                ret['box3d_lidar'] = ops.zeros((1, 9), mindspore.float32)
                ret['scores'] = ops.zeros((1), mindspore.float32)
                ret['label_preds'] = ops.zeros((1), mindspore.int64)
            ret['token'] = metas[0]
            ret_list.append(ret)

        if prev_track_id is not None:
            track_rets_list = []
            num_tracks = len(track_rets[0])
            for i in range(num_tracks):
                ret = {}
                for k in ['box3d_lidar', 'scores', 'label_preds', 'tracking_id']:
                    retss2 = []
                    if k in ["box3d_lidar", "scores", 'tracking_id']:
                        for retss in track_rets:
                            if str(retss[i][k]) != '[]':
                                retss1 = retss[i][k]
                                retss2.append(retss1)
                        if str(retss2) != '[]':
                            ret[k] = concat([retss3 for retss3 in retss2])
                    elif k in ["label_preds"]:
                        flag = 0
                        for j, num_class in enumerate(self.num_classes):
                            track_rets[j][i][k] += flag
                            flag += num_class
                        for retss in track_rets:
                            if str(retss[i][k]) != '[]':
                                retss1 = retss[i][k]
                                retss2.append(retss1)
                        if str(retss2) != '[]':
                            ret[k] = concat([retss3 for retss3 in retss2])

                if str(ret) == '{}':
                    # ret = track_rets[0][0]
                    ret['box3d_lidar'] = ops.zeros((1, 9), mindspore.float32)
                    ret['scores'] = ops.zeros((1), mindspore.float32)
                    ret['label_preds'] = ops.zeros((1), mindspore.int64)
                    ret['tracking_id'] = ops.zeros((1), mindspore.int64)
                ret['token'] = metas[0]
                track_rets_list.append(ret)

            return ret_list, track_rets_list
        else:
            return ret_list

    def post_processing_tracking(self, batch_box_preds, batch_hm, prev_tracking_id, test_cfg, post_center_range):
        batch_size = len(batch_hm)

        prediction_dicts = []
        expand_dims = ops.ExpandDims()
        squeeze = ops.Squeeze(-1)
        for i in range(batch_size):
            box_preds = batch_box_preds[i].copy()
            hm_preds = batch_hm[i].copy()
            prev_id = prev_tracking_id[i]
            labels, scores = ops.ArgMaxWithValue(axis=-1)(hm_preds)
            prev_id = squeeze(ops.GatherD()(prev_id, 1, expand_dims(labels, -1)))

            score_mask = scores > test_cfg['score_threshold']
            distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all(1).asnumpy() \
                            & (box_preds[..., :3] <= post_center_range[3:]).all(1).asnumpy()

            mask = distance_mask & score_mask.asnumpy()
            mask = (Tensor(mask, mindspore.bool_) * 1).nonzero().squeeze(axis=1)

            box_preds = box_preds.gather(mask, axis=0)
            scores = scores.gather(mask, axis=0)
            labels = labels.gather(mask, axis=0)
            prev_id = prev_id.gather(mask, axis=0)

            if str(box_preds.asnumpy()) != '[]':
                boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]]

                selected = nms_gpu(boxes_for_nms, scores,
                                   thresh=test_cfg['nms']['nms_iou_threshold'],
                                   pre_maxsize=test_cfg['nms']['nms_pre_max_size'],
                                   post_max_size=test_cfg['nms']['nms_post_max_size'])


                # import torch
                #
                # boxes_for_nms = torch.tensor(boxes_for_nms.asnumpy()).to('cuda')
                # scores = torch.tensor(scores.asnumpy()).to('cuda')
                #
                # selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms, scores,
                #                                           thresh=test_cfg['nms']['nms_iou_threshold'],
                #                                           pre_maxsize=test_cfg['nms']['nms_pre_max_size'],
                #                                           post_max_size=test_cfg['nms']['nms_post_max_size'])
                # boxes_for_nms = mindspore.Tensor(boxes_for_nms.to('cpu').numpy(), mindspore.float32)
                # scores = mindspore.Tensor(scores.to('cpu').numpy(), mindspore.float32)
                # selected = mindspore.Tensor(selected.to('cpu').numpy(), mindspore.int64)
                # assert np.close(selected.asnumpy(), selected2.to('cpu').numpy()), [selected, selected2]

            else:
                # selected = torch.tensor([], device='cuda', dtype=torch.int64)
                selected = scores.astype('int32')

            # selected = mindspore.Tensor(selected.to('cpu').numpy(), mindspore.int64)

            selected_boxes = box_preds.gather(selected, axis=0)
            selected_scores = scores.gather(selected, axis=0)
            selected_labels = labels.gather(selected, axis=0)
            selected_id = prev_id.gather(selected, axis=0)

        prediction_dict = {
            'box3d_lidar': selected_boxes,
            'scores': selected_scores,
            'label_preds': selected_labels,
            "tracking_id": selected_id,
        }

        prediction_dicts.append(prediction_dict)

        return prediction_dicts
