'''
Created on Aug 1, 2018

@author: zyx
'''

import numpy as np
from lib.rpn.generate_anchor import generate_anchors
from lib.nms.nms import gpu_nms_wrapper
from lib.bbox.bbox_transform import bbox_pred, clip_boxes,bbox_overlaps,bbox_transform
import numpy.random as npr
import mxnet as mx
from lib.bbox.bbox_regression import expand_bbox_regression_targets
class Proposal(object):
    def __init__(self,cfg,output_score = False):
        super(Proposal, self).__init__()
        self._feat_stride = cfg.network.RPN_FEAT_STRIDE
        self._scales = np.array(tuple(cfg.network.ANCHOR_SCALES))
        self._ratios = np.array(tuple(cfg.network.ANCHOR_RATIOS))

        self._output_score = output_score
        self._rpn_pre_nms_top_n = cfg.TRAIN.RPN_PRE_NMS_TOP_N
        self._rpn_post_nms_top_n = cfg.TRAIN.RPN_POST_NMS_TOP_N
        self._threshold = cfg.TRAIN.RPN_NMS_THRESH
        self._rpn_min_size = cfg.TRAIN.RPN_MIN_SIZE


        
        self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios)
        self._num_anchors = self._anchors.shape[0]
     
    
    def __call__(self,batch_cls_score,batch_input_bbox_pred,batch_im_info):
        if isinstance(batch_cls_score,mx.nd.NDArray):
            batch_cls_score = batch_cls_score.asnumpy()
        if isinstance(batch_input_bbox_pred,mx.nd.NDArray):
            batch_input_bbox_pred = batch_input_bbox_pred.asnumpy()
        if isinstance(batch_im_info,mx.nd.NDArray):
            batch_im_info = batch_im_info.asnumpy()
        nms = gpu_nms_wrapper(self._threshold, 0)
        blobs = []
        r_scores = []
        for n_batch in range(batch_cls_score.shape[0]):
            cls_score,input_bbox_pred,im_info = batch_cls_score[n_batch:n_batch+1],batch_input_bbox_pred[n_batch:n_batch+1],batch_im_info[n_batch:n_batch+1]
            # for each (H, W) location i
            #   generate A anchor boxes centered on cell i
            #   apply predicted bbox deltas at cell i to each of the A anchors
            # clip predicted boxes to image
            # remove predicted boxes with either height or width < threshold
            # sort all (proposal, score) pairs by score from highest to lowest
            # take top pre_nms_topN proposals before NMS
            # apply NMS with threshold 0.7 to remaining proposals
            # take after_nms_topN proposals after NMS
            # return the top proposals (-> RoIs top, scores top)

            pre_nms_topN = self._rpn_pre_nms_top_n
            post_nms_topN = self._rpn_post_nms_top_n
            min_size = self._rpn_min_size

            # the first set of anchors are background probabilities
            # keep the second part
            scores = cls_score[:, self._num_anchors:, :, :]
            bbox_deltas = input_bbox_pred
            im_info = im_info[0, :]


            # 1. Generate proposals from bbox_deltas and shifted anchors
            # use real image size instead of padded feature map sizes
            height, width = int(im_info[0] / self._feat_stride), int(im_info[1] / self._feat_stride)


            # Enumerate all shifts
            shift_x = np.arange(0, width) * self._feat_stride
            shift_y = np.arange(0, height) * self._feat_stride
            shift_x, shift_y = np.meshgrid(shift_x, shift_y)
            shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()

            # Enumerate all shifted anchors:
            #
            # add A anchors (1, A, 4) to
            # cell K shifts (K, 1, 4) to get
            # shift anchors (K, A, 4)
            # reshape to (K*A, 4) shifted anchors
            A = self._num_anchors
            K = shifts.shape[0]
            anchors = self._anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
            anchors = anchors.reshape((K * A, 4))

            # Transpose and reshape predicted bbox transformations to get them
            # into the same order as the anchors:
            #
            # bbox deltas will be (1, 4 * A, H, W) format
            # transpose to (1, H, W, 4 * A)
            # reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
            # in slowest to fastest order
            bbox_deltas = self._clip_pad(bbox_deltas, (height, width))
            bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))

            # Same story for the scores:
            #
            # scores are (1, A, H, W) format
            # transpose to (1, H, W, A)
            # reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
            scores = self._clip_pad(scores, (height, width))
            scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))

            # Convert anchors into proposals via bbox transformations
            proposals = bbox_pred(anchors, bbox_deltas)

            # 2. clip predicted boxes to image
            proposals = clip_boxes(proposals, im_info[:2])

            # 3. remove predicted boxes with either height or width < threshold
            # (NOTE: convert min_size to input image scale stored in im_info[2])

            keep = self._filter_boxes(proposals, min_size * im_info[2])
            scores = scores[keep]
            proposals = proposals[keep]

            # 4. sort all (proposal, score) pairs by score from highest to lowest
            # 5. take top pre_nms_topN (e.g. 6000)
            order = scores.ravel().argsort()[::-1]
            if pre_nms_topN > 0:
                order = order[:pre_nms_topN]
            proposals = proposals[order, :]
            scores = scores[order]

            # 6. apply nms (e.g. threshold = 0.7)
            # 7. take after_nms_topN (e.g. 300)
            # 8. return the top proposals (-> RoIs top)
            det = np.hstack((proposals, scores)).astype(np.float32)
            assert len(det) > 0
            keep = nms(det)
            if post_nms_topN > 0:
                keep = keep[:post_nms_topN]
            # pad to ensure output size remains unchanged
            if len(keep) < post_nms_topN:
                pad = npr.choice(keep, size=post_nms_topN - len(keep))
                keep = np.hstack((keep, pad))
            proposals = proposals[keep, :]
            scores = scores[keep]

            batch_inds = np.ones((proposals.shape[0], 1), dtype=np.float32) * n_batch
            blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
            blobs.append(blob[np.newaxis])
            r_scores.append(scores[np.newaxis])
        blobs = np.concatenate(blobs,axis=0)
        r_scores = np.concatenate(r_scores,axis=0)
        return blobs,r_scores
    @staticmethod
    def _filter_boxes(boxes, min_size):
        """ Remove all boxes with any side smaller than min_size """
        ws = boxes[:, 2] - boxes[:, 0] + 1
        hs = boxes[:, 3] - boxes[:, 1] + 1
        keep = np.where((ws >= min_size) & (hs >= min_size))[0]
        return keep

    @staticmethod
    def _clip_pad(tensor, pad_shape):
        """
        Clip boxes of the pad area.
        :param tensor: [n, c, H, W]
        :param pad_shape: [h, w]
        :return: [n, c, h, w]
        """
        H, W = tensor.shape[2:]
        h, w = pad_shape

        if h < H or w < W:
            tensor = tensor[:, :, :h, :w].copy()

        return tensor

class Proposal_Target(object):
    def __init__(self, cfg):
        super(Proposal_Target, self).__init__()
        self._cfg = cfg

        self._batch_rois = cfg.TRAIN.BATCH_ROIS
        #        self._batch_images = cfg.TRAIN.BATCH_IMAGES
        self._fg_fraction = cfg.TRAIN.FG_FRACTION
        self._num_classes = cfg.dataset.NUM_CLASSES
        self._num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else self.num_classes)
        self.proposal = Proposal(cfg)

    def __call__(self, rpn_cls_act_reshape, rpn_bbox_pred, im_info, batch_gt_boxes):
        batch_all_rois, _ = self.proposal(rpn_cls_act_reshape, rpn_bbox_pred, im_info)
        r_rois = []
        r_labels = []
        r_bbox_targets = []
        r_bbox_weights = []
        for n_batch in range(batch_all_rois.shape[0]):
            all_rois = batch_all_rois[n_batch]
            gt_boxes = batch_gt_boxes[n_batch]
            gt_boxes = gt_boxes[np.where(gt_boxes[:, 4] > 0)[0]]
            assert self._batch_rois == -1
            if self._batch_rois == -1:
                rois_per_image = all_rois.shape[0] + gt_boxes.shape[0]
                fg_rois_per_image = rois_per_image
            else:
                rois_per_image = self._batch_rois / 1
                fg_rois_per_image = np.round(self._fg_fraction * rois_per_image).astype(int)
            # Include ground-truth boxes in the set of candidate rois
            zeros = np.ones((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype) * n_batch
            all_rois = np.vstack((all_rois, np.hstack((zeros, gt_boxes[:, :-1]))))
            # Sanity check: single batch only
            assert np.all(all_rois[:, 0] == n_batch), 'Only single item batches are supported'

            rois, labels, bbox_targets, bbox_weights = self.sample_rois(all_rois, fg_rois_per_image, rois_per_image,
                                                                        self._num_reg_classes, self._cfg,
                                                                        gt_boxes=gt_boxes)
            r_rois.append(rois)
            r_labels.append(labels)
            r_bbox_targets.append(bbox_targets)
            r_bbox_weights.append(bbox_weights)
        r_rois = np.concatenate(r_rois, axis=0)
        r_labels = np.concatenate(r_labels, axis=0)
        r_bbox_targets = np.concatenate(r_bbox_targets, axis=0)
        r_bbox_weights = np.concatenate(r_bbox_weights, axis=0)
        r_labels_one_hot = np.zeros(shape=(r_labels.shape[0], self._num_classes))
        for nclass in range(self._num_classes):
            r_labels_one_hot[np.where(r_labels[:] == nclass)[0], nclass] = 1
        return r_rois, r_labels, r_labels_one_hot, r_bbox_targets, r_bbox_weights

    def sample_rois(self, rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
                    labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):
        """
        generate random sample of ROIs comprising foreground and background examples
        :param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
        :param fg_rois_per_image: foreground roi number
        :param rois_per_image: total roi number
        :param num_classes: number of classes
        :param labels: maybe precomputed
        :param overlaps: maybe precomputed (max_overlaps)
        :param bbox_targets: maybe precomputed
        :param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
        :return: (labels, rois, bbox_targets, bbox_weights)
        """
        assert labels is None
        if labels is None:
            overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
            gt_assignment = overlaps.argmax(axis=1)
            overlaps = overlaps.max(axis=1)
            labels = gt_boxes[gt_assignment, 4]

        # foreground RoI with FG_THRESH overlap
        fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
        # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
        fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
        # Sample foreground regions without replacement
        if len(fg_indexes) > fg_rois_per_this_image:
            fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)

        # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
        bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
        # Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
        bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
        bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)
        # Sample foreground regions without replacement
        if len(bg_indexes) > bg_rois_per_this_image:
            bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)

        # indexes selected
        keep_indexes = np.append(fg_indexes, bg_indexes)

        # pad more to ensure a fixed minibatch size
        while keep_indexes.shape[0] < rois_per_image:
            gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])
            gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
            keep_indexes = np.append(keep_indexes, gap_indexes)

        # select labels
        labels = labels[keep_indexes]
        # set labels of bg_rois to be 0
        labels[fg_rois_per_this_image:] = 0
        rois = rois[keep_indexes]

        assert bbox_targets is None
        # load or compute bbox_target
        if bbox_targets is not None:
            bbox_target_data = bbox_targets[keep_indexes, :]
        else:
            targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
            if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
                targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
                           / np.array(cfg.TRAIN.BBOX_STDS))
            bbox_target_data = np.hstack((labels[:, np.newaxis], targets))

        bbox_targets, bbox_weights = \
            expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)

        return rois, labels, bbox_targets, bbox_weights





def box_annotator_ohem(cls_score,bbox_pred,labels,bbox_targets,bbox_weights,num_classes, num_reg_classes, roi_per_img):

    labels = labels.asnumpy()
    per_roi_loss_cls = mx.nd.SoftmaxActivation(cls_score) + 1e-14
    per_roi_loss_cls = per_roi_loss_cls.asnumpy()
    per_roi_loss_cls = per_roi_loss_cls[np.arange(per_roi_loss_cls.shape[0], dtype='int'), labels.astype('int')]
    per_roi_loss_cls = -1 * np.log(per_roi_loss_cls)
    per_roi_loss_cls= np.reshape(per_roi_loss_cls, newshape=(-1,))

    per_roi_loss_bbox = bbox_weights * mx.nd.smooth_l1((bbox_pred - bbox_targets), scalar=1.0)
    per_roi_loss_bbox = mx.nd.sum(per_roi_loss_bbox, axis=1).asnumpy()

    top_k_per_roi_loss = np.argsort(per_roi_loss_cls + per_roi_loss_bbox)
    labels_ohem = labels
    labels_ohem[top_k_per_roi_loss[::-1][roi_per_img:]] = -1
    bbox_weights_ohem = bbox_weights.asnumpy()
    bbox_weights_ohem[top_k_per_roi_loss[::-1][roi_per_img:]] = 0

    index = top_k_per_roi_loss[::-1][:roi_per_img]

    return index
    labels_ohem_onehot = np.zeros(shape=(labels_ohem.shape[0],num_classes),dtype = np.float32)


    for n_class in range(num_classes):
        labels_ohem_onehot[np.where(labels_ohem==n_class)[0],n_class] = 1
    labels_ohem_weight = np.ones(shape=(labels_ohem.shape[0],num_classes))
    labels_ohem_weight[np.where(labels_ohem==-1)[0],:] = 0

    label_cnt = labels_ohem[np.where(labels_ohem != -1)[0]].size + 1

    labels_ohem = mx.nd.array(labels_ohem,ctx = cls_score.context)
    labels_ohem_onehot = mx.nd.array(labels_ohem_onehot,ctx=cls_score.context)
    bbox_weights_ohem = mx.nd.array(bbox_weights_ohem,ctx  = cls_score.context)
    labels_ohem_weight = mx.nd.array(labels_ohem_weight,ctx = cls_score.context)
    return labels_ohem, labels_ohem_onehot,labels_ohem_weight,bbox_weights_ohem,label_cnt