import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

from utils.bbox import bbox_overlaps, min_area_square,rbox_2_quad
from utils.box_coder import BoxCoder
from utils.overlaps.rbox_overlaps import rbox_overlaps

from shapely.geometry import Polygon,MultiPoint  #多边形
from tqdm import tqdm


def skewiou(box1, box2):
    box1=np.asarray(box1).reshape(4,2)
    box2=np.asarray(box2).reshape(4,2)
    poly1 = Polygon(box1).convex_hull  
    poly2 = Polygon(box2).convex_hull
    if not poly1.is_valid or not poly2.is_valid :
        print('formatting errors for boxes!!!! ')
        return 0
    if  poly1.area == 0 or  poly2.area  == 0 :
        return 0, 0
    inter = Polygon(poly1).intersection(Polygon(poly2)).area
    union = poly1.area + poly2.area - inter
    if union == 0:
        return 0, 0
    else:
        return inter/union, inter
    
def xyxy2xywh_a(query_boxes):
    out_boxes = query_boxes.copy()
    out_boxes[:, 0] = (query_boxes[:, 0] + query_boxes[:, 2]) * 0.5
    out_boxes[:, 1] = (query_boxes[:, 1] + query_boxes[:, 3]) * 0.5
    out_boxes[:, 2] = query_boxes[:, 2] - query_boxes[:, 0]
    out_boxes[:, 3] = query_boxes[:, 3] - query_boxes[:, 1]
    return out_boxes

# cuda_overlaps
class IntegratedLoss(nn.Module):
    def __init__(self, alpha=0.25, gamma=2.0, func = 'smooth'):
        super(IntegratedLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.box_coder = BoxCoder()
        if func == 'smooth':
            self.criteron = smooth_l1_loss
        elif func == 'mse':
            self.criteron = F.mse_loss
        elif func == 'balanced':
            self.criteron = balanced_l1_loss
        elif func == 'iou_smooth':
            self.criteron = IoU_smooth_l1_loss
            
    def forward(self, classifications, regressions, anchors, refined_achors, annotations,iou_thres=0.5):
        
        cls_losses = []
        reg_losses = []
        batch_size = classifications.shape[0]
        for j in range(batch_size):
            classification = classifications[j, :, :]
            regression = regressions[j, :, :]
            bbox_annotation = annotations[j, :, :]
            
            #每张图片里面物体的个数由这个batch中拥有最大的物体数量来决定，即不够数量的用-1填充，但要保持同一个batch中有相同数量标签
#             print('bbox_annotation',bbox_annotation)
            bbox_annotation = bbox_annotation[bbox_annotation[:, -1] != -1]
#             print('bbox_annotation',bbox_annotation)
            if bbox_annotation.shape[0] == 0:
                cls_losses.append(torch.tensor(0).float().cuda())
                reg_losses.append(torch.tensor(0).float().cuda())
                continue
            classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
            indicator = bbox_overlaps(
                min_area_square(anchors[j, :, :]),
                min_area_square(bbox_annotation[:, :-1])
            )
            #indicator跟anchor的一个维度的长度一样，第二个维度为1
#             print(indicator)
            
            #print(anchors[j, :, :].shape)
            #(n,5):n为anchor的个数，5是anchor由x,y,w,h,theta表示
            ious = rbox_overlaps(
                anchors[j, :, :].cpu().numpy(),
                bbox_annotation[:, :-1].cpu().numpy(),
                indicator.cpu().numpy(),
                thresh=1e-1
            )
#             print('11111111111',anchors[j, :, :].cpu().numpy())
#             print('22222222222',bbox_annotation[:, :-1].cpu().numpy())
            if not torch.is_tensor(ious):
                ious = torch.from_numpy(ious).cuda()
#             print('ious',ious.shape)
            iou_max, iou_argmax = torch.max(ious, dim=1)
#             print(iou_max)
            positive_indices = torch.ge(iou_max, iou_thres)
#             print(positive_indices)
            max_gt, argmax_gt = ious.max(0) 
            if (max_gt < iou_thres).any():
                positive_indices[argmax_gt[max_gt < iou_thres]]=1
              
            # cls loss
            #cls_targets先全部置为-1
            cls_targets = (torch.ones(classification.shape) * -1).cuda()
#             print(cls_targets.shape)
            #如果与所有gt的iou都没有到iou_thres - 0.1这个阈值的anchor，就为负样本
            cls_targets[torch.lt(iou_max, iou_thres - 0.1), :] = 0
#             print(cls_targets.shape)
            num_positive_anchors = positive_indices.sum()
            assigned_annotations = bbox_annotation[iou_argmax, :]
            cls_targets[positive_indices, :] = 0
            cls_targets[positive_indices, assigned_annotations[positive_indices, -1].long()] = 1
            alpha_factor = torch.ones(cls_targets.shape).cuda() * self.alpha
            alpha_factor = torch.where(torch.eq(cls_targets, 1.), alpha_factor, 1. - alpha_factor)
            focal_weight = torch.where(torch.eq(cls_targets, 1.), 1. - classification, classification)
            focal_weight = alpha_factor * torch.pow(focal_weight, self.gamma)
            bin_cross_entropy = -(cls_targets * torch.log(classification+1e-6) + (1.0 - cls_targets) * torch.log(1.0 - classification+1e-6))
            cls_loss = focal_weight * bin_cross_entropy 
            cls_loss = torch.where(torch.ne(cls_targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())
            cls_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.float(), min=1.0))
            # reg loss
            if positive_indices.sum() > 0:
                all_rois = anchors[j, positive_indices, :]
                gt_boxes = assigned_annotations[positive_indices, :]
                reg_targets = self.box_coder.encode(all_rois, gt_boxes)
#------------------------------------------------添加iou_loss-----------------------------------------------------                
                if self.criteron == IoU_smooth_l1_loss:
                    all_refine_anchors = refined_achors[j, positive_indices, :]
#                     print('gggg',gt_boxes)
#                     print('rrrr',all_refine_anchors)
#                     indicator = bbox_overlaps(
#                         min_area_square(gt_boxes[:, :-1]),
#                         min_area_square(all_refine_anchors[ :, :])
#                     )
#                     print(type(gt_boxes))
                    ious = []
                    for i in range(len(gt_boxes)):
                        iou, inter = skewiou(rbox_2_quad(gt_boxes[i, :-1].cpu().numpy(),mode='xyxya'),rbox_2_quad(all_refine_anchors[i,:].cpu().numpy(),mode='xyxya'))
#                         iou = 1
                        ious.append(iou)
                    ious = np.array(ious)
#                     print(ious)

#                     ious = rbox_overlaps(
#                         xyxy2xywh_a(gt_boxes[:, :-1].cpu().numpy()),
#                         xyxy2xywh_a(all_refine_anchors[ :, :].cpu().numpy()),
#                         indicator.cpu().numpy(),   
#                         thresh=1e-1)
#                     if not torch.is_tensor(ious):
#                         ious = torch.from_numpy(ious).cuda()
#                     print("11111111",gt_boxes[:, :-1].cpu().numpy())
#                     print("22222222",all_refine_anchors[ :, :].cpu().numpy())
#                     print("33333333",ious)

#                    ious_max = np.max(ious, axis=1, keepdims=True)

#                     print(ious_max)
                    #-------这个地方是个巨坑，算iou不是对位计算，而事排列组合算一遍，所以取最大的就可以了----------------
#                     tensor([[456.9600, 347.2000, 540.9600, 367.3600, -34.1145,   1.0000],
#                             [741.4400, 846.7200, 820.9600, 865.7600, -34.5923,   1.0000]],
#                                device='cuda:0')
#                     tensor([[465.3726, 348.6863, 510.6274, 371.3137, -45.0000],
#                             [753.3726, 844.6863, 798.6274, 867.3137, -45.0000]], device='cuda:0')
#                             [[0.7994555  0.21653193]
#                              [0.2142188  0.84745246]]
#                     print(ious)
            
                    reg_loss = self.criteron(regression[positive_indices, :], reg_targets,ious)
#---------------------------------------------------------------------------------------------------------------------
                else:
                    reg_loss = self.criteron(regression[positive_indices, :], reg_targets)
                reg_losses.append(reg_loss)

#                 if not torch.isfinite(reg_loss) :
#                     import ipdb; ipdb.set_trace()
            else:
                reg_losses.append(torch.tensor(0).float().cuda())
        loss_cls = torch.stack(cls_losses).mean(dim=0, keepdim=True)
        loss_reg = torch.stack(reg_losses).mean(dim=0, keepdim=True)
        return loss_cls, loss_reg

    
def smooth_l1_loss(inputs,
                   targets,
                   beta=1. / 9,
                   size_average=True,
                   weight = None):
    """
    https://github.com/facebookresearch/maskrcnn-benchmark
    """
    diff = torch.abs(inputs - targets)
    if  weight is  None:
        loss = torch.where(
            diff < beta,
            0.5 * diff ** 2 / beta,
            diff - 0.5 * beta
        )
    else:
        loss = torch.where(
            diff < beta,
            0.5 * diff ** 2 / beta,
            diff - 0.5 * beta
        ) * weight.max(1)[0].unsqueeze(1).repeat(1,5)
    if size_average:
        return loss.mean()
    return loss.sum()


def balanced_l1_loss(inputs,
                     targets,
                     beta=1. / 9,
                     alpha=0.5,
                     gamma=1.5,
                     size_average=True):
    """Balanced L1 Loss
    arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
    """
    assert beta > 0
    assert inputs.size() == targets.size() and targets.numel() > 0
#     print(inputs.shape)
#     print(targets)
    
    diff = torch.abs(inputs - targets)
    b = np.e**(gamma / alpha) - 1
    loss = torch.where(
        diff < beta, alpha / b *
        (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
        gamma * diff + gamma / b - alpha * beta)

    if size_average:
        return loss.mean()
    return loss.sum()

def IoU_smooth_l1_loss(inputs,
                   targets,
                   ious,
                   beta=1. / 9,
                   size_average=True,
                   weight = None):
#L2:
#     diff = torch.pow(torch.abs(inputs - targets),2)*1/2 
#L1:
    diff = torch.abs(inputs - targets)
    loss =diff
    print('ang_loss',loss[0][4])
    print('ious',ious)
#     if  weight is  None:
#         loss = torch.where(
#             diff < beta,
#             0.5 * diff ** 2 / beta,
#             diff - 0.5 * beta
#         )
#     else:
#         loss = torch.where(
#             diff < beta,
#             0.5 * diff ** 2 / beta,
#             diff - 0.5 * beta
#         ) * weight.max(1)[0].unsqueeze(1).repeat(1,5)
    loss_reg = 0
    loss_iou = 0
    #len(loss)代表有几个物体
#     print('loss',loss)
    for i in range(len(loss)):

        for j in range(len(loss[0])):
            loss_reg = loss[i][j]
            with torch.no_grad():
                alpha = np.exp(1-abs(ious[i])) - 1
                
#                 alpha = - np.log(abs(ious[i]) + 1e-7)
                alpha = torch.tensor(alpha).cuda()
                loss_reg_norm = loss_reg.detach()
                print('alpha',alpha)
                print('loss_reg_norm',loss_reg_norm)
#             loss_iou += alpha*loss_reg/(loss_reg_norm + 1e-7)
            loss_iou += alpha*loss_reg
    if size_average:
        return loss_iou / len(loss)
    return loss_iou.sum() 

# 