'''
Author: SlytherinGe
LastEditTime: 2021-07-04 22:07:49
'''
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from mmcv.runner.hooks import HOOKS, Hook
from mmcv import get_logger
import torch
import torch.nn as nn

import matplotlib.pyplot as plt
import numpy as np

from multiprocessing import Queue

q = Queue()

def test_img_and_mask(img, mask,im_meta):
   
    img = np.array(img)
    plt.figure()
    plt.subplot(1,2,1)
    plt.imshow(img[0], cmap='gray')
    plt.subplot(1,2,2)
    plt.imshow(mask*img[0], cmap='gray')
    plt.title(im_meta['filename'])
    plt.show()

@HOOKS.register_module()
class UpdataTrainingEpochHook(Hook):
    def before_epoch(self, runner):
        current_epoch = runner.epoch
        total_epochs = runner.max_epochs
        q.put((current_epoch, total_epochs))


@DETECTORS.register_module()
class FocusFCOS(BaseDetector):

    def __init__(self,
                 backbone,
                 paoi_head,
                 bbox_head,
                 train_cfg,
                 test_cfg,
                 neck=None,
                 pretrained=None):
        super(FocusFCOS, self).__init__()
        self.backbone = build_backbone(backbone)

        if neck is not None:
            self.neck = build_neck(neck)

        if paoi_head is not None:
            self.paoi_head = build_head(paoi_head)
        
        if bbox_head is not None:
            self.bbox_head = bbox_head
            self.bbox_head.update(train_cfg=train_cfg)
            self.bbox_head.update(test_cfg=test_cfg)
            self.bbox_head = build_head(self.bbox_head)


        self.ACTIVATE_LEVEL = 0.4
        self.GAMMA = 6
        self.__total_epoch = 1
        self.__current_epoch = 1
        self._activate_level = self.ACTIVATE_LEVEL
        

        self.train_cfg = train_cfg
        self.test_cfg = test_cfg

        self.init_weights(pretrained=pretrained)

        self.logger = get_logger('mmdet')

    def _get_activate_level(self, base_lvl=0.4, gamma=6):
        
        if not q.empty():
            current_epoch, total_epochs = q.get()
            self.logger.info("current_epoch, total_epochs:{}, {}".format(current_epoch, total_epochs))
            if current_epoch <= total_epochs / 2:
                self._activate_level = ((current_epoch * 2 / total_epochs) ** gamma) * base_lvl
                self.logger.info('activate level:{}'.format(self._activate_level))


    def fuse_function(self, paoi_map, feat):
        
        if not self.with_bbox_head:
            return paoi_map,
        else:
            activate = torch.zeros_like(paoi_map)
            paoi_map = torch.sigmoid(paoi_map)
            activate[paoi_map >= self._activate_level] = 1.0
            activate[paoi_map < self._activate_level] = -1e-3
            activate = activate.detach()
            return feat * activate, activate

    @property
    def with_paoi(self):
        """bool: whether the detector has RPN"""
        return hasattr(self, 'paoi_head') and self.paoi_head is not None

    @property
    def with_bbox_head(self):
        """bool: whether the detector has a RoI head"""
        return hasattr(self, 'bbox_head') and self.bbox_head is not None

    def init_weights(self, pretrained=None):
        """Initialize the weights in detector.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        super(FocusFCOS, self).init_weights(pretrained)
        self.backbone.init_weights(pretrained=pretrained)
        if self.with_neck:
            if isinstance(self.neck, nn.Sequential):
                for m in self.neck:
                    m.init_weights()
            else:
                self.neck.init_weights()
        if self.with_paoi:
            self.paoi_head.init_weights()
        if self.with_bbox_head:
            self.bbox_head.init_weights()

    def extract_feat(self, img):
        """Directly extract features from the backbone+neck."""
        x = self.backbone(img)
        if self.with_neck:
            x = self.neck(x)
        return x

    def forward_dummy(self, img):
        """Used for computing network flops.

        See `mmdetection/tools/analysis_tools/get_flops.py`
        """
        outs = ()
        # backbone
        x = self.extract_feat(img)
        # paoi
        if self.with_paoi:
            paoi_outs = self.paoi_head(x)
            outs = outs + (paoi_outs, )

        return outs

    def forward_train(self,
                      img,
                      img_metas,
                      gt_bboxes,
                      gt_labels,
                      gt_bboxes_ignore=None,
                      gt_masks=None,
                      proposals=None,
                      **kwargs):
        """
        Args:
            img (Tensor): of shape (N, C, H, W) encoding input images.
                Typically these should be mean centered and std scaled.

            img_metas (list[dict]): list of image info dict where each dict
                has: 'img_shape', 'scale_factor', 'flip', and may also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmdet/datasets/pipelines/formatting.py:Collect`.

            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.

            gt_labels (list[Tensor]): class indices corresponding to each box

            gt_bboxes_ignore (None | list[Tensor]): specify which bounding
                boxes can be ignored when computing the loss.

            gt_masks (None | Tensor) : true segmentation masks for each box
                used if the architecture supports a segmentation task.

            proposals : override paoi proposals with custom proposals. Use when
                `with_paoi` is False.

        Returns:
            dict[str, Tensor]: a dictionary of loss components
        """
        # test_img_and_mask(img[0].cpu(), gt_masks[0], img_metas[0])

        x = self.extract_feat(img)

        losses = dict()

        paois = []

        # RPN forward and loss
        if self.with_paoi:
            paoi_losses, proposal_list = self.paoi_head.forward_train(
                x,
                img_metas,
                gt_bboxes,
                gt_masks,
                gt_labels=None,
                gt_bboxes_ignore=gt_bboxes_ignore,
                proposal_cfg=1)
            losses.update(paoi_losses)
            tmp = []
            ind = 0
            self._get_activate_level(self.ACTIVATE_LEVEL, self.GAMMA)
            for k, attention in enumerate(proposal_list):
                fused_feat, activate_map = self.fuse_function(attention, x[k])
                tmp.append(fused_feat)
                paois.append(activate_map)
                # tmp.append(x[k] * attention * 2)
                ind = k
            x = tuple(tmp) + x[ind+1:]

        if self.with_bbox_head:
            bbox_losses = self.bbox_head.forward_train(x, paois, img_metas, gt_bboxes,
                                                gt_labels, gt_bboxes_ignore)
            losses.update(bbox_losses)

        return losses


    def simple_test(self, img, img_metas, proposals=None, rescale=False):
        """Test without augmentation."""
        # assert self.with_bbox, 'Bbox head must be implemented.'

        x = self.extract_feat(img)

        paois = []

        # get origin input shape to onnx dynamic input shape
        if torch.onnx.is_in_onnx_export():
            img_shape = torch._shape_as_tensor(img)[2:]
            img_metas[0]['img_shape_for_onnx'] = img_shape

        if self.with_paoi:
            proposal_list = self.paoi_head.simple_test_rpn(x, img_metas)
            tmp = []
            ind = 0
            for k, attention in enumerate(proposal_list):
                results = self.fuse_function(attention, x[k])
                if self.with_bbox_head:
                    fused_feat, activate_map = results
                    paois.append(activate_map)
                else:
                    fused_feat = results[0]
                tmp.append(fused_feat)
                ind = k
            x = tuple(tmp) + x[ind+1:]
        else:
            paois = None

        if self.with_bbox_head:
            outs = self.bbox_head(x)
            # get origin input shape to support onnx dynamic shape
            if torch.onnx.is_in_onnx_export():
                # get shape as tensor
                img_shape = torch._shape_as_tensor(img)[2:]
                img_metas[0]['img_shape_for_onnx'] = img_shape
            outs += paois,
            bbox_list = self.bbox_head.get_bboxes(
                *outs, img_metas, rescale=rescale)
            # skip post-processing when exporting to ONNX
            if torch.onnx.is_in_onnx_export():
                return bbox_list

            bbox_results = [
                bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
                for det_bboxes, det_labels in bbox_list
            ]
            return bbox_results
        else:
            return x

    def aug_test(self, imgs, img_metas, rescale=False):
        """Test with augmentations.

        If rescale is False, then returned bboxes and masks will fit the scale
        of imgs[0].
        """
        raise NotImplementedError