# '''
# Author: SlytherinGe
# LastEditTime: 2021-06-27 19:45:25
# '''
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# import cv2
# import numpy as np
# from mmcv.cnn import Scale, normal_init,ConvModule, bias_init_with_prob, xavier_init
# from mmcv.runner import force_fp32
# import mmcv
# from abc import ABCMeta, abstractmethod
# import matplotlib.pyplot as plt

# from mmdet.core import bbox_rescale, multi_apply
# if __name__ == '__main__':
#     from mmdet.models.builder import HEADS, build_loss
#     # from mmdet.models.dense_heads.base_dense_head import BaseDenseHead
#     from mmdet.models.dense_heads.rpn_test_mixin import RPNTestMixin
# else:
#     from ..builder import HEADS, build_loss
#     # from .base_dense_head import BaseDenseHead
#     from .rpn_test_mixin import RPNTestMixin

# INF = 1e8

# @HEADS.register_module()
# class PAOIHead(RPNTestMixin, nn.Module, metaclass=ABCMeta):
#     def __init__(self,
#                  in_channels,
#                  feat_channels=256,
#                  stacked_convs=4,
#                  strides=(8, 16, 32, 64, 128),
#                  dcn_on_last_conv=False,
#                  conv_bias='auto',
#                 #  loss_attention=dict(
#                 #      type='SmoothL1Loss',
#                         #      loss_weight=1.0),
#                 loss_attention=dict(
#                     type='CrossEntropyLoss',
#                     use_sigmoid=True,
#                     loss_weight=1.0),
#                  conv_cfg=None,
#                  norm_cfg=dict(type='BN', requires_grad=True),
#                  train_cfg=None,
#                  test_cfg=None,
#                 #  TODO: move some of these hyper parameters into train_cfg
#                  sigma=30,
#                  attention_stages=3,
#                  pos_thr = 0.1,
#                  neg_thr = 1e-6,
#                  target_ratio = 0.025,
#                  _USE_MASK=False,
#                  _detach=False,
#                  _paoi_sampler='center'):

#         super(PAOIHead, self).__init__()
#         self.in_channels = in_channels
#         self.feat_channels = feat_channels
#         self.stacked_convs = stacked_convs
#         self.strides = strides
#         self.dcn_on_last_conv = dcn_on_last_conv
#         assert conv_bias == 'auto' or isinstance(conv_bias, bool)
#         self.conv_bias = conv_bias        
#         self.loss_attention = build_loss(loss_attention)
#         self.train_cfg = train_cfg
#         self.test_cfg = test_cfg
#         self.conv_cfg = conv_cfg
#         self.norm_cfg = norm_cfg
#         # sigma to compute the gaussion blur for attention generationg
#         self.sigma = sigma 
#         self.attention_stages = attention_stages
#         self.pos_thr = pos_thr
#         self.neg_thr = neg_thr
#         self.target_ratio = target_ratio
#         self.fp16_enabled = False
#         self._USE_MASK = _USE_MASK
#         self._detach = _detach
#         self._paoi_sampler = _paoi_sampler

#         self.__init_convs()
#         self.__init_predictor()
#         self._func_paoi_sampler = self.__init_paoi_sampler(self._paoi_sampler)

#         assert self.attention_stages == len(self.strides)

#     def __paoi_center_sampler(self, paoi_map, points):
#         return paoi_map[points[:,1], points[:,0]]

#     def __paoi_average_sampler(self, paoi_map, points):
#         pooled_paois = []
#         for stride in self.strides:
#             pooled_paoi = torch.nn.functional.avg_pool2d(paoi_map[None], kernel_size=stride)
#             pooled_paois.append(pooled_paoi.reshape(-1))
#         return  torch.cat(pooled_paois)

#     def __paoi_max_sampler(self, paoi_map, points):
#         pooled_paois = []
#         for stride in self.strides:
#             pooled_paoi = torch.max_pool2d(paoi_map[None], kernel_size=stride)
#             pooled_paois.append(pooled_paoi.reshape(-1))
#         return  torch.cat(pooled_paois)

#     def __init_paoi_sampler(self, method='center'):
#         assert method in ('center', 'average', 'max', 'middle')

#         dict_sample_functions = dict(
#             center=self.__paoi_center_sampler,
#             max=self.__paoi_max_sampler,
#             average=self.__paoi_average_sampler
#         )

#         return dict_sample_functions[method]

#     def __init_convs(self):
#         # TODO: analyzing if using dialited convs can boost the performence
#         self.attention_convs = nn.ModuleList()
#         for i in range(self.stacked_convs):
#             chn = self.in_channels if i == 0 else self.feat_channels
#             if self.dcn_on_last_conv and i == self.stacked_convs - 1:
#                 conv_cfg = dict(type='DCNv2')
#             else:
#                 conv_cfg = self.conv_cfg
#             self.attention_convs.append(
#                 ConvModule(
#                     chn,
#                     self.feat_channels,
#                     3,
#                     stride=1,
#                     padding=1,
#                     dilation=1,
#                     bias=self.conv_bias,
#                     conv_cfg=conv_cfg,
#                     norm_cfg=self.norm_cfg
#                 )
#             )
        
#     def __init_predictor(self):
#         # self.attention_pred = nn.Sequential(
#         #     nn.Conv2d(self.feat_channels,
#         #                                 1,
#         #                                 kernel_size=3,
#         #                                 stride=1,
#         #                                 padding=1),
#         #     nn.Sigmoid()
#         # )
#         self.attention_pred = nn.Conv2d(self.feat_channels,
#                                         1,
#                                         kernel_size=3,
#                                         stride=1,
#                                         padding=1)

#     def init_weights(self):
        
#         for m in self.attention_convs:
#             if isinstance(m.conv, nn.Conv2d):
#                 xavier_init(m.conv, distribution='uniform')
#         xavier_init(self.attention_pred, distribution='uniform')

#     def forward(self, feats):
#         """Forward features from the upstream network.

#         Args:
#             feats (tuple[Tensor]): Features from the upstream network, each is
#                 a 4D-tensor.

#         Returns:
#             tuple: Usually contain classification scores and bbox predictions.
#                 cls_scores (list[Tensor]): Box scores for each scale level,
#                     each is a 4D-tensor, the channel number is
#                     num_points * num_classes.
#                 bbox_preds (list[Tensor]): Box energies / deltas for each scale
#                     level, each is a 4D-tensor, the channel number is
#                     num_points * 4.
#         """  
#         # mmcv.dump(feats,'/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/paoi_feats.pkl')   
#         return multi_apply(self.forward_single, feats[:self.attention_stages])
#         # return feats[:self.attention_stages],

#     def forward_single(self, x):
#         """Forward features of a single scale levle.

#         Args:
#             x (Tensor): FPN feature maps of the specified stride.

#         Returns:
#             tuple: Scores for each class, bbox predictions, features
#                 after classification and regression conv layers, some
#                 models needs these features like FCOS.
#         """
#         feat = x if self._detach == False else x.detach()

#         for attention_layers in self.attention_convs:
#             feat = attention_layers(feat)
#         attention_map = self.attention_pred(feat) 
#         return (attention_map,)

#     @force_fp32(apply_to=('attention_map',))
#     def loss(self,
#              attention_map,
#              gt_bboxes,
#              gt_masks,
#              img_metas,
#              gt_bboxes_ignore=None):
#         """Compute loss of the head.

#         Args:
#             attention_map (list[Tensor]): Attention map for each scale level,
#             each is a 4D-tensor, the channel number is 1
#             gt_masks (list[Tensor]): Ground truth bboxes for each image with
#                 shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
#             img_metas (list[dict]): Meta information of each image, e.g.,
#                 image size, scaling factor, etc.
#             gt_bboxes_ignore (None | list[Tensor]): specify which bounding
#                 boxes can be ignored when computing the loss.

#         Returns:
#             dict[str, Tensor]: A dictionary of loss components.
#         """
#         featmap_sizes = [featmap.size()[-2:] for featmap in attention_map]
#         # cast each locations on attention maps of different levels to the location
#         # of the origin image.
#         all_level_points = self.get_points(featmap_sizes, attention_map[0].device)
#         # generate heat map of each image according to their gt bboxes, and build the
#         # relation ships between feature maps with the heat map
#         attention_targets = self.get_targets(all_level_points, gt_bboxes, gt_masks, img_metas).reshape(-1)
#         att_feat_map = []
#         for m in attention_map:
#             att_map = m
#             att_feat_map.append(att_map.permute(0,2,3,1).reshape(att_map.shape[0],-1))
#         att_feat_map = torch.cat(att_feat_map, dim=1).reshape(-1)
#         # sample targets
#         '''
#         按比例采样
#         '''
#         # num_points = attention_targets.shape[0]
#         # _, indices = torch.sort(attention_targets)
#         # num_sample = int(num_points * self.target_ratio)
#         # sample_indices = torch.cat((indices[:int(num_sample*0.1)], indices[-num_sample:]))
#         # att_feat_map = att_feat_map[sample_indices]
#         # attention_targets = attention_targets[sample_indices]
#         # num_points = attention_targets.shape[0]
#         '''
#         按阈值采样
#         '''
#         # pos_inds = torch.nonzero(attention_targets > self.pos_thr)
#         # num_pos = pos_inds.shape[0]
#         # neg_inds = torch.nonzero(attention_targets < self.neg_thr)
#         # num_neg = neg_inds.shape[0]
#         # if num_pos < num_neg:
#         #     neg_sampler = torch.randperm(num_neg)[:int(num_pos*0.1)]
#         #     neg_inds = neg_inds[neg_sampler]

#         # target_inds = torch.cat((pos_inds, neg_inds))
#         # num_points = target_inds.shape[0]
#         # att_feat_map = att_feat_map[target_inds]
#         # attention_targets = attention_targets[target_inds]

#         # loss_attention = self.loss_attention(att_feat_map,
#         #                                      attention_targets, avg_factor=num_points)
#         '''
#         手动采样loss最大的target_ratio目标
#         '''
#         # all_point_loss = self.loss_attention(att_feat_map,
#         #                                      attention_targets,
#         #                                      reduction_override='none').reshape(-1)
#         # num_points = int(all_point_loss.shape[0] * self.target_ratio)

#         # topk_losses,_ = torch.topk(all_point_loss, num_points)
#         # loss_attention = torch.sum(topk_losses) / torch.tensor([num_points], dtype=all_point_loss.dtype, device=all_point_loss.device)

#         '''
#         无采样
#         '''
#         num_points = len((attention_targets > 0.99999).nonzero(as_tuple=False).reshape(-1))
#         # num_points = attention_targets.shape[0]
#         loss_attention = self.loss_attention(torch.sigmoid(att_feat_map),
#                                         attention_targets, avg_factor=num_points)
#         return dict(loss_attention=loss_attention)
        

#     def get_targets(self, points, gt_bboxes_list, gt_masks_list, img_metas):
#         """Compute attention targets for points
#         in multiple images.

#         Args:
#             points (list[Tensor]): Points of each fpn level, each has shape
#                 (num_points, 2).
#             gt_masks_list (list[Tensor]): Ground truth bboxes of each image,
#                 each has shape (num_gt, 4).

#         Returns:
#         """  
#         # points from one single image. in hrnet, input images are of the same size.
#         concat_points = torch.cat(points, dim=0)

#         attention_regress_target_list, = multi_apply(
#             self._get_target_single,
#             gt_bboxes_list,
#             gt_masks_list,
#             img_metas,
#             points=concat_points
#         )
#         concat_targets = torch.cat(attention_regress_target_list, dim=0)
#         return concat_targets
    
#     def _get_target_single(self, gt_bboxes, gt_masks, img_metas, points):

#         if self._USE_MASK:
#             heat_tensor = torch.tensor(gt_masks,dtype=torch.float32, device=points.device)
#         else:
#             img_shape = img_metas['img_shape']
#             gt_bboxes = gt_bboxes.type(torch.LongTensor)
#             background = np.zeros((gt_bboxes.shape[0], img_shape[0], img_shape[1]))
#             for i in range(gt_bboxes.shape[0]):
#                 background[i,gt_bboxes[i,1]:gt_bboxes[i,3],gt_bboxes[i,0]:gt_bboxes[i,2]] = 1.0
#                 background[i] = cv2.GaussianBlur(background[i],(0,0),self.sigma)
#                 background[i] /= np.max(background[i])
#             heat_map = np.sum(background,axis=0)
#             heat_map[heat_map>self.pos_thr] = 1.0
#             heat_tensor = torch.tensor(heat_map,dtype=torch.float32, device=points.device)
            
#         # expand its dims to equal the points
#         # regress_target = heat_tensor[points[:,1], points[:,0]]
#         regress_target = self._func_paoi_sampler(heat_tensor, points)

#         return regress_target[None],
        

#     def _get_points_single(self,
#                            featmap_size,
#                            stride,
#                            device,
#                            flatten=False):
#         """Get points of a single scale level."""
#         h, w = featmap_size
#         x_range = torch.arange(w, dtype=torch.long, device=device)
#         y_range = torch.arange(h, dtype=torch.long, device=device)
#         y, x = torch.meshgrid(y_range, x_range)
#         if flatten:
#             y = y.flatten()
#             x = x.flatten()
#         points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
#                              dim=-1) + stride // 2
#         point_stride = torch.tensor(stride, device=device).expand_as(points[:,0]).reshape(-1,1)
#         points = torch.cat((points, point_stride), dim=-1)
#         return points

#     def get_points(self, featmap_sizes, device, flatten=False):
#         """Get points according to feature map sizes.

#         Args:
#             featmap_sizes (list[tuple]): Multi-level feature map sizes.
#             dtype (torch.dtype): Type of points.
#             device (torch.device): Device of points.

#         Returns:
#             tuple: points of each image.
#         """
#         mlvl_points = []
#         for i in range(len(featmap_sizes)):
#             mlvl_points.append(
#                 self._get_points_single(featmap_sizes[i], self.strides[i],
#                                         device, flatten))
#         return mlvl_points

#     @force_fp32(apply_to=('attention_map',))
#     def get_bboxes(self,
#                    attention_map,
#                    img_metas,
#                    cfg=None,
#                    rescale=None): 
#         # output the results from paoi
#         # mmcv.dump(attention_map,'/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/paoi_results.pkl')

#         return attention_map
        

#     def forward_train(self,
#                         x,
#                         img_metas,
#                         gt_bboxes,
#                         gt_masks,
#                         gt_labels=None,
#                         gt_bboxes_ignore=None,
#                         proposal_cfg=None,
#                         **kwargs):
#         """
#         Args:
#             x (list[Tensor]): Features from FPN.
#             img_metas (list[dict]): Meta information of each image, e.g.,
#                 image size, scaling factor, etc.
#             gt_masks (Tensor): Ground truth bboxes of the image,
#                 shape (num_gts, 4).
#             gt_labels (Tensor): Ground truth labels of each box,
#                 shape (num_gts,).
#             gt_bboxes_ignore (Tensor): Ground truth bboxes to be
#                 ignored, shape (num_ignored_gts, 4).
#             proposal_cfg (mmcv.Config): Test / postprocessing configuration,
#                 if None, test_cfg would be used

#         Returns:
#             tuple:
#                 losses: (dict[str, Tensor]): A dictionary of loss components.
#                 proposal_list (list[Tensor]): Proposals of each image.
#         """

#         outs = self(x)
#         if gt_labels is None:
#             loss_inputs = outs + (gt_bboxes, gt_masks, img_metas)
#         else:
#             loss_inputs = outs + (gt_bboxes, gt_masks, gt_labels, img_metas)
#         losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
#         if proposal_cfg is None:
#             return losses
#         else:
#             proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
#             return losses, proposal_list
