import torch
import numpy as np

import torch.nn as nn
from torch.nn import functional as F

# Architecture
from models.nets.backbones.resnet import resnet50
from models.nets.heads.rpn import RegionProposalNetwork
from models.nets.heads.resnet50roihead import Resnet50RoIHead

# Loss
from models.detectors.anchors import AnchorTargetCreator, ProposalTargetCreator

# Decoder
from torchvision.ops import nms
from utils.bbox import loc2bbox

class FasterRCNN(nn.Module):
    def __init__(self, args):
        super(FasterRCNN, self).__init__()

         #---------------------------------#
        #   Backbone
        #---------------------------------#       
        if args.backbone == "resnet50":
            # [1024,38,38]
            self.backbone, self.classifier = resnet50(args.pretrained)
        else:
            raise ValueError("backbone must be resnet50")

        #---------------------------------#
        #TODO   NECK
        #---------------------------------#
        # if args.neck == "fpn":
        #     self.neck = FPN()
        # else:
        #     raise ValueError("Neck must be fpn")   
     
        #---------------------------------#
        #   RPNHead
        #---------------------------------#
        if args.rpn_head == "RPN":
            self.rpn_head = RegionProposalNetwork(
                                1024, 512,
                                ratios          = args.anchor_ratios,
                                anchor_scales   = args.anchor_scales,
                                feat_stride     = args.feat_stride,
                                mode            = args.mode
            )
        else:
            raise ValueError("rpn_head must be rpn")

        #---------------------------------#
        #   ROIHead
        #---------------------------------#
        if args.roi_head == "Resnet50RoIHead":
            self.roi_head = Resnet50RoIHead(
                n_class         = args.num_classes + 1,
                roi_size        = 14,
                spatial_scale   = 1,
                classifier      = self.classifier
            )
        else:
            raise ValueError("roi_head must be resnet50roihead")
    
    def forward(self, x, mode = "inference", scale=1.):
        if mode == "inference":
            img_size = x.shape[2:]

            base_feature    = self.backbone.forward(x)

            _, _, rois, roi_indices, _  = self.rpn_head.forward(base_feature, img_size, scale)

            roi_cls_locs, roi_scores    = self.roi_head.forward(base_feature, rois, roi_indices, img_size)

            return roi_cls_locs, roi_scores, rois, roi_indices

        elif mode == "backbone":
            base_feature    = self.backbone.forward(x)
            return base_feature
        
        elif mode == "rpn_head":
            base_feature, img_size = x
            rpn_locs, rpn_scores, rois, roi_indices, anchor = self.rpn_head.forward(base_feature, img_size, scale)
            return rpn_locs, rpn_scores, rois, roi_indices, anchor

        elif mode == "roi_head":
            base_feature, rois, roi_indices, img_size = x
            roi_cls_locs, roi_scores    = self.roi_head.forward(base_feature, rois, roi_indices, img_size)
            return roi_cls_locs, roi_scores

        else:
            raise ValueError("Unrecognized Model Processing Mode")
        
    def freeze_bn(self):
        for m in self.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()

class FasterRCNNLoss(nn.Module):
    def __init__(self, args):
        super(FasterRCNNLoss, self).__init__()

        self.anchor_target_creator      = AnchorTargetCreator()
        self.proposal_target_creator    = ProposalTargetCreator()
        self.rpn_sigma      = 1
        self.roi_sigma      = 1
        self.loc_normalize_std          = [0.1, 0.1, 0.2, 0.2]        

    def forward(self, output, imgs, bboxes, labels, mode = 'rpn_head'):
        if mode == 'rpn_head':
            return self.rpn_loss(output, imgs, bboxes, labels)
        elif mode == 'roi_head':
            return self.roi_loss(output, imgs, bboxes, labels)
        else:
            raise ValueError("Unrecognized Loss Mode")
    
    def rpn_loss(self, output, imgs, bboxes, labels):

        rpn_locs, rpn_scores, rois, roi_indices, anchor = output
        bs = imgs.shape[0]
        # bboxes, labels = bboxes, labels

        rpn_loc_loss_all, rpn_cls_loss_all= 0, 0
        sample_rois, sample_indexes, gt_roi_locs, gt_roi_labels  = [], [], [], []

        for i in range(bs):
            bbox        = bboxes[i]
            label       = labels[i]
            rpn_loc     = rpn_locs[i]
            rpn_score   = rpn_scores[i]
            roi         = rois[i]
            # -------------------------------------------------- #
            #   利用真实框和先验框获得建议框网络应该有的预测结果
            #   给每个先验框都打上标签
            #   gt_rpn_loc      [num_anchors, 4]
            #   gt_rpn_label    [num_anchors, ]
            # -------------------------------------------------- #
            gt_rpn_loc, gt_rpn_label    = self.anchor_target_creator(bbox, anchor[0].cpu().numpy())
            gt_rpn_loc                  = torch.Tensor(gt_rpn_loc).type_as(rpn_locs)
            gt_rpn_label                = torch.Tensor(gt_rpn_label).type_as(rpn_locs).long()
            # -------------------------------------------------- #
            #   分别计算建议框网络的回归损失和分类损失
            # -------------------------------------------------- #
            rpn_loc_loss = self.fast_rcnn_loc_loss(rpn_loc, gt_rpn_loc, gt_rpn_label, self.rpn_sigma)
            rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label, ignore_index=-1)
  
            rpn_loc_loss_all += rpn_loc_loss
            rpn_cls_loss_all += rpn_cls_loss
            # ------------------------------------------------------ #
            #   利用真实框和建议框获得classifier网络应该有的预测结果
            #   获得三个变量，分别是sample_roi, gt_roi_loc, gt_roi_label
            #   sample_roi      [n_sample, ]
            #   gt_roi_loc      [n_sample, 4]
            #   gt_roi_label    [n_sample, ]
            # ------------------------------------------------------ #
            sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(roi, bbox, label, self.loc_normalize_std)
            # 采样128个正样本
            sample_rois.append(torch.Tensor(sample_roi).type_as(rpn_locs))
            sample_indexes.append(torch.ones(len(sample_roi)).type_as(rpn_locs) * roi_indices[i][0])
            # 获得每张图中最后128个正样本的GT
            gt_roi_locs.append(torch.Tensor(gt_roi_loc).type_as(rpn_locs))
            gt_roi_labels.append(torch.Tensor(gt_roi_label).type_as(rpn_locs).long())
            
        sample_rois     = torch.stack(sample_rois, dim=0)
        sample_indexes  = torch.stack(sample_indexes, dim=0)

        return rpn_loc_loss_all, rpn_cls_loss_all, sample_rois, sample_indexes, gt_roi_locs, gt_roi_labels
    
    def roi_loss(self, output, imgs, bboxes, labels):
        
        roi_cls_locs, roi_scores, gt_roi_locs, gt_roi_labels = output
        bs = imgs.shape[0]
        # bboxes, labels = bboxes, labels

        roi_loc_loss_all, roi_cls_loss_all= 0, 0

        for i in range(bs):
            # ------------------------------------------------------ #
            #   根据建议框的种类，取出对应的回归预测结果
            # ------------------------------------------------------ #
            n_sample = roi_cls_locs.size()[1]
            
            roi_cls_loc     = roi_cls_locs[i]
            roi_score       = roi_scores[i]
            # 每张图中GT编码的到的128个框和对应的类别
            gt_roi_loc      = gt_roi_locs[i]
            gt_roi_label    = gt_roi_labels[i]
            
            roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)
            roi_loc     = roi_cls_loc[torch.arange(0, n_sample), gt_roi_label]

            # -------------------------------------------------- #
            #   分别计算Classifier网络的回归损失和分类损失
            # -------------------------------------------------- #
            roi_loc_loss = self.fast_rcnn_loc_loss(roi_loc, gt_roi_loc, gt_roi_label.data, self.roi_sigma)
            roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label)

            roi_loc_loss_all += roi_loc_loss
            roi_cls_loss_all += roi_cls_loss      
        
        return roi_loc_loss_all, roi_cls_loss_all

    def fast_rcnn_loc_loss(self, pred_loc, gt_loc, gt_label, sigma):
            pred_loc    = pred_loc[gt_label > 0]
            gt_loc      = gt_loc[gt_label > 0]

            sigma_squared = sigma ** 2
            regression_diff = (gt_loc - pred_loc)
            regression_diff = regression_diff.abs().float()
            regression_loss = torch.where(
                    regression_diff < (1. / sigma_squared),
                    0.5 * sigma_squared * regression_diff ** 2,
                    regression_diff - 0.5 / sigma_squared
                )
            regression_loss = regression_loss.sum()
            num_pos         = (gt_label > 0).sum().float()
            
            regression_loss /= torch.max(num_pos, torch.ones_like(num_pos))
            return regression_loss

class FasterRCNNBboxDecoder():
    def __init__(self, args):
        super(FasterRCNNBboxDecoder, self).__init__()
        self.std    = torch.Tensor([0.1, 0.1, 0.2, 0.2]).repeat(args.num_classes + 1)[None]
        if args.cuda:
            self.std    = self.std.to(args.device)
        self.num_classes    = args.num_classes + 1
        
    def frcnn_correct_boxes(self, box_xy, box_wh, input_shape, image_shape):
        #-----------------------------------------------------------------#
        #   把y轴放前面是因为方便预测框和图像的宽高进行相乘
        #-----------------------------------------------------------------#
        box_yx = box_xy[..., ::-1]
        box_hw = box_wh[..., ::-1]
        input_shape = np.array(input_shape)
        image_shape = np.array(image_shape)

        box_mins    = box_yx - (box_hw / 2.)
        box_maxes   = box_yx + (box_hw / 2.)
        boxes  = np.concatenate([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2]], axis=-1)
        boxes *= np.concatenate([image_shape, image_shape], axis=-1)
        return boxes

    def forward(self, output, image_shape, input_shape, nms_iou = 0.3, confidence = 0.5):
        roi_cls_locs, roi_scores, rois, _ = output
        results = []
        bs      = len(roi_cls_locs)
        #--------------------------------#
        #   batch_size, num_rois, 4
        #--------------------------------#
        rois    = rois.view((bs, -1, 4))
        #----------------------------------------------------------------------------------------------------------------#
        #   对每一张图片进行处理，由于在predict.py的时候，我们只输入一张图片，所以for i in range(len(mbox_loc))只进行一次
        #----------------------------------------------------------------------------------------------------------------#
        for i in range(bs):
            #----------------------------------------------------------#
            #   对回归参数进行reshape
            #----------------------------------------------------------#
            roi_cls_loc = roi_cls_locs[i] * self.std
            #----------------------------------------------------------#
            #   第一维度是建议框的数量，第二维度是每个种类
            #   第三维度是对应种类的调整参数
            #----------------------------------------------------------#
            roi_cls_loc = roi_cls_loc.view([-1, self.num_classes, 4])

            #-------------------------------------------------------------#
            #   利用classifier网络的预测结果对建议框进行调整获得预测框
            #   num_rois, 4 -> num_rois, 1, 4 -> num_rois, num_classes, 4
            #-------------------------------------------------------------#
            roi         = rois[i].view((-1, 1, 4)).expand_as(roi_cls_loc)
            cls_bbox    = loc2bbox(roi.contiguous().view((-1, 4)), roi_cls_loc.contiguous().view((-1, 4)))
            cls_bbox    = cls_bbox.view([-1, (self.num_classes), 4])
            #-------------------------------------------------------------#
            #   对预测框进行归一化，调整到0-1之间
            #-------------------------------------------------------------#
            cls_bbox[..., [0, 2]] = (cls_bbox[..., [0, 2]]) / input_shape[1]
            cls_bbox[..., [1, 3]] = (cls_bbox[..., [1, 3]]) / input_shape[0]

            roi_score   = roi_scores[i]
            prob        = F.softmax(roi_score, dim=-1)

            results.append([])
            for c in range(1, self.num_classes):
                #--------------------------------#
                #   取出属于该类的所有框的置信度
                #   判断是否大于门限
                #--------------------------------#
                c_confs     = prob[:, c]
                c_confs_m   = c_confs > confidence

                if len(c_confs[c_confs_m]) > 0:
                    #-----------------------------------------#
                    #   取出得分高于confidence的框
                    #-----------------------------------------#
                    boxes_to_process = cls_bbox[c_confs_m, c]
                    confs_to_process = c_confs[c_confs_m]

                    keep = nms(
                        boxes_to_process,
                        confs_to_process,
                        nms_iou
                    )
                    #-----------------------------------------#
                    #   取出在非极大抑制中效果较好的内容
                    #-----------------------------------------#
                    good_boxes  = boxes_to_process[keep]
                    confs       = confs_to_process[keep][:, None]
                    labels      = (c - 1) * torch.ones((len(keep), 1)).cuda() if confs.is_cuda else (c - 1) * torch.ones((len(keep), 1))
                    #-----------------------------------------#
                    #   将label、置信度、框的位置进行堆叠。
                    #-----------------------------------------#
                    c_pred      = torch.cat((good_boxes, confs, labels), dim=1).cpu().numpy()
                    # 添加进result里
                    results[-1].extend(c_pred)

            if len(results[-1]) > 0:
                results[-1] = np.array(results[-1])
                box_xy, box_wh = (results[-1][:, 0:2] + results[-1][:, 2:4])/2, results[-1][:, 2:4] - results[-1][:, 0:2]
                results[-1][:, :4] = self.frcnn_correct_boxes(box_xy, box_wh, input_shape, image_shape)

        return results