from functools import reduce
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from tensorflow.keras import layers

# @tf.function
def xywh2xyxy(bbox):
    """
        bbox张量, 形状(B,nx,ny,na,4), [xc,yc,w,h]
    """
    xmin = (bbox[...,0] - bbox[...,2] / 2) 
    ymin = (bbox[...,1] - bbox[...,3] / 2) 
    xmax = (bbox[...,0] + bbox[...,2] / 2) 
    ymax = (bbox[...,1] + bbox[...,3] / 2) 
    return tf.stack([xmin,ymin,xmax,ymax],axis=-1)

# @tf.function
def bbox_square(bbox):
    """ 
        bbox张量, 形状(B,nx,ny,na,4) [xmin,ymin,xmax,ymax]
    """
    return tf.maximum(bbox[...,2]-bbox[...,0], 0.) * tf.maximum(bbox[...,3]-bbox[...,1], 0.)

# @tf.function
def inner_bbox(bbox1,bbox2):
    """
        bbox1,bbox2张量, 形状(B,nx,ny,na,4) (xmin,ymin,xmax,ymax)
        相交情况下, 四个值中可能出现负值, 但左上角点和右下脚点的关系不会错
        不相交情况下, 角点关系可能出错 => 如果不满足角点关系, 则必然bbox1和bbox2不相交
    """
    xmin = tf.maximum(bbox1[...,0],bbox2[...,0]) 
    ymin = tf.maximum(bbox1[...,1],bbox2[...,1]) 
    xmax = tf.minimum(bbox1[...,2],bbox2[...,2]) 
    ymax = tf.minimum(bbox1[...,3],bbox2[...,3]) 
    return tf.stack([xmin,ymin,xmax,ymax],axis=-1) # 先不处理非相交情况, 也不作任何填充, 在计算面积时处理

# @tf.function
def outer_bbox(bbox1,bbox2):
    """
        计算最大包裹矩形
        bbox1,bbox2张量, 形状(B,nx,ny,na,4) (xmin,ymin,xmax,ymax)
    """
    xmin = tf.minimum(bbox1[...,0],bbox2[...,0]) 
    ymin = tf.minimum(bbox1[...,1],bbox2[...,1]) 
    xmax = tf.maximum(bbox1[...,2],bbox2[...,2]) 
    ymax = tf.maximum(bbox1[...,3],bbox2[...,3]) 
    return tf.stack([xmin,ymin,xmax,ymax],axis=-1)

# @tf.function
def bbox_iou(bbox1,bbox2,xyxy=True,GIoU=False,DIoU=False,CIoU=False):
    #! 改用yolo中的函数写法
    #! bbox1 -> gt, bbox2 -> pr
    if xyxy:
        b1_x1, b1_y1, b1_x2, b1_y2 = bbox1[...,0], bbox1[...,1], bbox1[...,2], bbox1[...,3]
        b2_x1, b2_y1, b2_x2, b2_y2 = bbox2[...,0], bbox2[...,1], bbox2[...,2], bbox2[...,3]
    else:
        b1_x1, b1_x2 = bbox1[...,0] - bbox1[...,2] / 2, bbox1[...,0] + bbox1[...,2] / 2
        b1_y1, b1_y2 = bbox1[...,1] - bbox1[...,3] / 2, bbox1[...,1] + bbox1[...,3] / 2
        b2_x1, b2_x2 = bbox2[...,0] - bbox2[...,2] / 2, bbox2[...,0] + bbox2[...,2] / 2
        b2_y1, b2_y2 = bbox2[...,1] - bbox2[...,3] / 2, bbox2[...,1] + bbox2[...,3] / 2

    inter = K.maximum(K.minimum(b1_x2,b2_x2) - K.maximum(b1_x1,b2_x1), 0.0) *\
            K.maximum(K.minimum(b1_y2,b2_y2) - K.maximum(b1_y1,b2_y1), 0.0)
    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + K.epsilon()
    w2, h2 = b2_x2 - b2_x1, b2_y2 - b1_y1 + K.epsilon()
    union = w1 * h1 + w2 * h2 - inter + K.epsilon()

    iou = inter / union
    if CIoU or DIoU or GIoU:
        cw = K.maximum(K.maximum(b1_x2,b2_x2) - K.minimum(b1_x1,b2_x1), 0.0)
        ch = K.maximum(K.maximum(b1_y2,b2_y2) - K.minimum(b1_y1,b2_y1), 0.0)
        if CIoU or DIoU:
            c2 = cw ** 2 + ch ** 2 + K.epsilon()
            rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + 
                    (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2)/4
            if CIoU:
                # v = tf.stop_gradient( #! 停止梯度
                #     (4 / math.pi ** 2) * K.pow(tf.atan2(w1,h1) - tf.atan2(w2,h2),2))
                v = (4 / math.pi ** 2) * K.pow(tf.atan2(w1,h1) - tf.atan2(w2,h2),2)
                # alpha = tf.stop_gradient( #! 停止梯度
                #     v / (v - iou + (1 + K.epsilon())))
                alpha = v / (v - iou + ( 1 + K.epsilon() ))
                return iou - (rho2/c2 + v * alpha) #! CIoU
            return iou - rho2/c2 #! DIoU
        else:
            c_area = cw * ch + K.epsilon()
            return iou - (c_area - union) / c_area #! GIoU
    return iou #!IoU

def FocalLoss(loss_fn,gamma=1.5,alpha=0.25,reduction='none'):

    def FocalLossFixed(y_true,y_pred):
        loss = loss_fn(y_true,y_pred)

        y_pred_prob = tf.nn.sigmoid(y_pred)
        p_t = y_true * y_pred_prob + (1 - y_true) * (1 - y_pred_prob)
        alpha_factor = y_true * alpha  + (1 - y_true) * (1 - alpha)
        modulating_factor = (1.0 - p_t) ** gamma
        loss *= alpha_factor * modulating_factor

        if reduction == "none":
            return loss
        else:
            loss = tf.reduce_sum(loss,axis=-1,keepdims=True)
            if reduction == "sum":
                return tf.reduce_sum(loss)
            else: # mean
                return tf.reduce_mean(loss)
    
    return FocalLossFixed

def BCEWithLogitsLoss(pos_weight=None,reduction='none'):
    if pos_weight is None: pos_weight = 1.0

    def BCEWithLogitsLossFixed(y_true,y_pred):

        y_pred_prob = tf.nn.sigmoid(y_pred)
        y_pred_prob = tf.clip_by_value(y_pred_prob, K.epsilon(), 1.0-K.epsilon())
        loss = - y_true * tf.math.log(y_pred_prob) - (1.0 - y_true) * tf.math.log(1.0 - y_pred_prob)
        loss = loss * y_true * pos_weight + loss * (1.0 - y_true)

        if reduction == "none":
            return loss
        else:
            # loss = tf.reduce_sum(loss,axis=-1,keepdims=True)
            if reduction == "sum":
                return tf.reduce_sum(loss)
            else: # mean
                return tf.reduce_mean(loss)
        
    return BCEWithLogitsLossFixed

def MyBCEWithLogitsLoss(masks,pos_weight=None,reduction='none'):
    if pos_weight is None: pos_weight = 1.0

    def BCEWithLogitsLossFixed(y_true,y_pred): # ! (B,nx*ny,na,nc)

        y_pred_prob = tf.nn.sigmoid(y_pred)
        y_pred_prob = tf.clip_by_value(y_pred_prob, K.epsilon(), 1.0-K.epsilon())
        loss = - y_true * tf.math.log(y_pred_prob) - (1.0 - y_true) * tf.math.log(1.0 - y_pred_prob)
        loss = loss * masks * pos_weight + loss * (1.0 - masks)

        if reduction == "none":
            return loss
        else:
            loss = tf.reduce_sum(loss,axis=-1,keepdims=True)
            if reduction == "sum":
                return tf.reduce_sum(loss)
            else: # mean
                return tf.reduce_mean(loss)

    return BCEWithLogitsLossFixed

def MyLoss(pos_weight,reduction='none'):

    def BCEWithLogitsLossFixed(y_true,y_pred): # ! (B,nx*ny,na,nc)

        y_pred_prob = tf.nn.sigmoid(y_pred)
        y_pred_prob = tf.clip_by_value(y_pred_prob, K.epsilon(), 1.0-K.epsilon())
        y_pred_prob_less_equal_than_pos_weight = tf.less_equal(y_pred_prob,pos_weight)
        y_pred_prob_less_equal = y_pred_prob / pos_weight * 0.5
        y_pred_prob_greater = 0.5 + 0.5 * (y_pred_prob - pos_weight) / (1.0 - pos_weight)
        y_pred_prob = tf.where(
            y_pred_prob_less_equal_than_pos_weight,
            y_pred_prob_less_equal,
            y_pred_prob_greater
        )
        loss = - y_true * tf.math.log(y_pred_prob) - (1.0 - y_true) * tf.math.log(1.0 - y_pred_prob)

        if reduction == "none":
            return loss
        else:
            loss = tf.reduce_sum(loss,axis=-1,keepdims=True)
            if reduction == "sum":
                return tf.reduce_sum(loss)
            else: # mean
                return tf.reduce_mean(loss)

    return BCEWithLogitsLossFixed


def get_positive_weight(trainset):
    positives_ratios = []
    for i,(images,labels_list) in enumerate(trainset):
        if i==0:
            b,nxy_0,na,_ = labels_list[0].shape
            _,nxy_1,__,_ = labels_list[1].shape
            _,nxy_2,__,_ = labels_list[2].shape
            nxys = [nxy_0,nxy_1,nxy_2]
        else:
            b = labels_list[0].shape[0]

        totals = [np.ones((b,nxy)) for nxy in nxys]
        positives = [np.zeros((b,nxy)) for nxy in nxys]
        for l,labels in enumerate(labels_list): #! (b,nx*ny,na,c)
            for ai in range(na):
                masks = labels[:,:,ai,4]
                positives[l] = np.where(masks.astype(np.bool),totals[l],positives[l])

            ratio = np.concatenate([
                (np.sum(positives[l],axis=1) / np.sum(totals[l],axis=1))[:,None] for l in range(len(labels_list))
            ],axis=1)
            positives_ratios.append(ratio)
    positives_ratios = np.row_stack(positives_ratios).sum(axis=1)
    return positives_ratios.mean()