import math
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
#! 导入自定义模块
from metric_utils import cxcywh_to_xyxy, xyxy_to_cxcywh, cxcywh_to_gcxcywh, gcxcywh_to_cxcywh, find_jaccard_overlap

class MultiBoxLoss:
    def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3):
        """
            priors_cxcy: 先验框, 格式`cx, cy, w, h`
        """
        self.priors_cxcy = priors_cxcy               #! 格式`cx,cy,w,h`
        self.priors_xy = cxcywh_to_xyxy(priors_cxcy) #! 格式`x1,y1,x2,y2`
        self.threshold = threshold                   #! 阈值
        self.neg_pos_ratio = neg_pos_ratio           #! 正负样本比例
 
class SSDSSDLoss(MultiBoxLoss): #! SSD的损失计算方法, SSD的损失使用
    def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
        super(SSDSSDLoss, self).__init__(priors_cxcy, threshold, neg_pos_ratio)
        self.alpha = alpha
        #! 损失函数
        self.smooth_l1 = keras.losses.MeanAbsoluteError()
        self.cross_entropy = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')

    def __call__(self,predicted_locs,predicted_scores,boxes,labels):
        """
            predicted_locs: 预测框, 格式`gcx, gcy, gw, gh`
            boxes:          真实框, 格式` x1,  y1, x2, y2`
            predicted_scores: logits
        """
        batch_size = predicted_locs.shape[0]
        n_priors = self.priors_cxcy.shape[0]
        n_classes = predicted_scores.shape[-1]

        assert n_priors == predicted_locs.shape[1] == predicted_scores.shape[1]

        true_locs = tf.zeros((batch_size, n_priors, 4))  #! (N,8732|24564,4)
        true_classes = tf.zeros((batch_size, n_priors))  #! (N,8732|24564)

        for i in range(batch_size):
            n_objects = boxes[i].shape[0]
            overlap = find_jaccard_overlap(boxes[i],
                                           self.priors_xy) #! (n_objects, 8732|24564) 注意: 需要与转换后的真实坐标进行比较
            
            #! 对每一个先验框, 找到与目标框最大的交并比 和 最交并比最大的目标框
            overlap_for_each_prior = tf.reduce_max(overlap, axis=0) #! (8732|24564)
            object_for_each_prior = tf.argmax(overlap, axis=0)      #! (8732|24564)

            #! 对每一个目标框, 找到与先验框最大的交并比 和 最交并比最大的先验框
            overlap_for_each_object = tf.reduce_max(overlap, axis=1) #! (n_objects) 交并比可能小于0.5; 多个目标框可能和同一个先验框交并比最大
            prior_for_each_object = tf.argmax(overlap, axis=1)       #! (n_objects)

            #! 即将用通过目标框找到的先验框 置换 先验框与目标框的对应关系, 确保每个目标框都有一个交并比最大的先验框与之匹配, 先验框可能没有目标框匹配
            indices = tf.range(n_objects, dtype=object_for_each_prior.dtype)

            #! 当一个先验框可能跟多个目标框匹配时, 它与哪个目标框匹配, 教程代码认为不重要, 这种情况发生的情况极少
            #! 这部分是否需要优化?
            # sorted_indices = tf.range(n_objects,dtype=object_for_each_prior.dtype)
            # sorted = tf.concat([
            #     overlap_for_each_object[:,None],
            #     tf.cast(prior_for_each_object[:,None],overlap_for_each_object.dtype),
            # ],axis=1)

            # sorted_ind = tf.argsort(sorted,axis=0)     #! 从小到大排序
            # sorted = tf.gather(sorted,sorted_ind[:,0]) #! 先按overlap排序
            # sorted = tf.gather(sorted,sorted_ind[:,1]) #! 再按索引排序
            # sorted_indices = tf.gather(sorted_indices,sorted_ind[:,0])
            # sorted_indices = tf.gather(sorted_indices,sorted_ind[:,1])

            # _, _, counts = tf.unique_with_counts(sorted[:,1])
            # counts = tf.concat([[0],counts[:-1]],axis=0)
            # sorted_ind = tf.cumsum(counts)[::-1] #! 排序变为从大到小

            # prior_for_each_object = tf.gather(prior_for_each_object, sorted_ind)
            # indices = tf.gather(indices, sorted_ind)            

            #! 更新匹配索引, 使每个目标框都有交并比最大的先验框匹配, 其他先验框与交并比最大的目标框匹配
            object_for_each_prior = tf.tensor_scatter_nd_update(object_for_each_prior, prior_for_each_object[:,None], indices)
            
            #! 为了确保与目标框i相交的先验框j, 且j是被i匹配交并比最大的先验框, 人为地使i与j的交并比大于0.5, 此时必然不会有先验框不被选中
            overlap_for_each_prior = tf.tensor_scatter_nd_update(overlap_for_each_prior, \
                                                                prior_for_each_object[:,None], \
                                                                tf.ones_like(indices,dtype=overlap_for_each_prior.dtype))
            
            #! 筛选出label, 并将交并比小于0.5的置为背景
            label_for_each_prior = tf.gather(labels[i], object_for_each_prior) #! 假设labels的形状为(8732|24564)
            label_for_each_prior = tf.where(tf.less(overlap_for_each_prior, 0.5), 0, label_for_each_prior) #! 数据类型是int

            #! 更新类别真值
            true_classes = tf.tensor_scatter_nd_update(true_classes, \
                                                       [[i]], \
                                                    tf.cast(label_for_each_prior[None], dtype=true_classes.dtype))

            #! 真实框位置编码
            prior_boxes = tf.cast(cxcywh_to_gcxcywh(
                xyxy_to_cxcywh(tf.gather(boxes[i], object_for_each_prior)),
                tf.cast(self.priors_cxcy, dtype=boxes[i].dtype)), dtype=true_locs.dtype)

            #! 更新位置真值
            true_locs = tf.tensor_scatter_nd_update(true_locs, [[i]], prior_boxes[None])

        positive_priors = true_classes != 0 #! (N,8732) #! 正例
        
        loc_loss = self.smooth_l1( #! 计算正例的位置损失
            tf.gather_nd(true_locs, tf.where(positive_priors)),     #! 二维(N,8732|24564,4) -> 二维(No,4)
            tf.gather_nd(predicted_locs, tf.where(positive_priors)) #! 二维(N,8732|24564,4) -> 二维(No,4)
        ) #! Scalar
        
        n_positives = tf.reduce_sum(tf.cast(positive_priors,dtype=predicted_scores.dtype),axis=1) #! (N,) 正例的数量
        #! 由于人为设置 与目标框交并比最大的先验框 的交并比为1, 故至少会存在1个n_positives, 不存在n_positives为0的情况
        n_hard_negatives = tf.cast(self.neg_pos_ratio * n_positives,dtype=tf.int64)               #! (neg_pos_ratio * N,) 负难例的数量

        conf_loss_all = self.cross_entropy(true_classes,predicted_scores) #! (N,8732|24564) · (N,8732|24564,n_classes) -> (N,8732|24564)

        conf_loss_pos = tf.tensor_scatter_nd_update(
            conf_loss_all,
            tf.where(~positive_priors),
            tf.zeros(shape=(tf.where(~positive_priors).shape[0],),dtype=conf_loss_all.dtype) #! (N,8732|24564)
        )
        conf_loss_neg = conf_loss_all[:] #! 复制一份
        conf_loss_neg = tf.tensor_scatter_nd_update(
            conf_loss_neg,
            tf.where(positive_priors),
            tf.zeros(shape=(tf.where( positive_priors).shape[0],),dtype=conf_loss_neg.dtype) #! (N,8732|24564)
        )
       
        conf_loss_neg_sorted_ind = tf.argsort(conf_loss_neg,direction="DESCENDING")
        conf_loss_sorted_neg = tf.gather(conf_loss_neg,conf_loss_neg_sorted_ind,batch_dims=-1) #! 排序后的值
        hard = tf.gather(conf_loss_sorted_neg,n_hard_negatives,batch_dims=1)[:,None]           #! 取hard分界线的值
        conf_loss_hard_neg = tf.where(tf.greater(conf_loss_sorted_neg,hard),conf_loss_sorted_neg,tf.zeros_like(conf_loss_sorted_neg)) #! 把小于hard的损失全部置0

        # conf_loss = (tf.reduce_sum(conf_loss_hard_neg,axis=-1) + tf.reduce_sum(conf_loss_pos,axis=-1)) / tf.maximum(n_positives,1.0) #! 对目标少的图片上的损失更敏感
        conf_loss= (tf.reduce_sum(conf_loss_hard_neg) + tf.reduce_sum(conf_loss_pos)) / tf.reduce_sum(n_positives) #! 对目标多的图片上的损失更敏感

        return conf_loss, loc_loss

class YOLOSSDLoss(MultiBoxLoss):    
    def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=3.):
        """
            priors_cxcy: 先验框, 格式`cx,cy,w,h`
            输入的置信度分数为logits, 所以分类损失计算时需要使用from_logits=True
        """        
        super(YOLOSSDLoss, self).__init__(priors_cxcy, threshold, neg_pos_ratio)
        self.alpha = alpha
        #! 损失函数 - 置信度损失
        self.cross_entropy = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
    
    def bbox_iou(self, bbox1, bbox2, xyxy=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False):
        if xyxy:
            b1_x1, b1_y1, b1_x2, b1_y2 = bbox1[...,0], bbox1[...,1], bbox1[...,2], bbox1[...,3]
            b2_x1, b2_y1, b2_x2, b2_y2 = bbox2[...,0], bbox2[...,1], bbox2[...,2], bbox2[...,3]
        else:
            b1_x1, b1_x2 = bbox1[...,0] - bbox1[...,2] / 2, bbox1[...,0] + bbox1[...,2] / 2
            b1_y1, b1_y2 = bbox1[...,1] - bbox1[...,3] / 2, bbox1[...,1] + bbox1[...,3] / 2
            b2_x1, b2_x2 = bbox2[...,0] - bbox2[...,2] / 2, bbox2[...,0] + bbox2[...,2] / 2
            b2_y1, b2_y2 = bbox2[...,1] - bbox2[...,3] / 2, bbox2[...,1] + bbox2[...,3] / 2

        inter = K.maximum(K.minimum(b1_x2,b2_x2) - K.maximum(b1_x1,b2_x1), 0.0) *\
                K.maximum(K.minimum(b1_y2,b2_y2) - K.maximum(b1_y1,b2_y1), 0.0)
        w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + K.epsilon()
        w2, h2 = b2_x2 - b2_x1, b2_y2 - b1_y1 + K.epsilon()
        union = w1 * h1 + w2 * h2 - inter + K.epsilon()

        iou = inter / union
        if EIoU or CIoU or DIoU or GIoU:
            cw = K.maximum(K.maximum(b1_x2,b2_x2) - K.minimum(b1_x1,b2_x1), 0.0)
            ch = K.maximum(K.maximum(b1_y2,b2_y2) - K.minimum(b1_y1,b2_y1), 0.0)
            if EIoU or CIoU or DIoU:
                c2 = cw ** 2 + ch ** 2 + K.epsilon()
                rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + 
                        (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2)/4
                if CIoU:
                    v = (4 / math.pi ** 2) * K.pow(tf.atan2(w1,h1) - tf.atan2(w2,h2),2)
                    alpha = v / (v - iou + ( 1 + K.epsilon() ))
                    return iou - (rho2/c2 + v * alpha) #! CIoU
                elif EIoU:
                    rho2_w = (b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 / 4
                    rho2_h = (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2 / 4
                    return iou - (rho2/c2 + rho2_w/(cw**2) + rho2_h/(ch**2)) #! EIoU
                return iou - rho2/c2 #! DIoU
            else:
                c_area = cw * ch + K.epsilon()
                return iou - (c_area - union) / c_area #! GIoU
        return iou #!IoU

    def __call__(self, predicted_locs, predicted_scores, boxes, labels):
        """
            predicted_locs: 预测框, 格式`gcx,gcy, gw, gh`, 通过解码后得到[0,1]之间的相对位置
            boxes:          真实框, 格式` x1, y1, x2, y2`, 在[0,1]之间的相对位置
            predicted_scores: logits
        """
        batch_size = predicted_locs.shape[0]
        n_priors = self.priors_cxcy.shape[0]
        n_classes = predicted_scores.shape[-1]

        assert n_priors == predicted_locs.shape[1] == predicted_scores.shape[1]

        true_locs = tf.zeros((batch_size,n_priors,4))  #! (N,8732|24564,4)
        true_classes = tf.zeros((batch_size,n_priors)) #! (N,8732|24564)

        for i in range(batch_size):
            n_objects = boxes[i].shape[0]
            overlap = find_jaccard_overlap(boxes[i],
                                           self.priors_xy) #! (n_objects, 8732|24564) 注意: 需要与转换后的真实坐标进行比较
            
            #! 对每一个先验框, 找到与目标框最大的交并比 和 最交并比最大的目标框
            overlap_for_each_prior = tf.reduce_max(overlap,axis=0) #! (8732|24564)
            object_for_each_prior = tf.argmax(overlap,axis=0)      #! (8732|24564)

            #! 对每一个目标框, 找到与先验框最大的交并比 和 最交并比最大的先验框
            overlap_for_each_object = tf.reduce_max(overlap,axis=1) #! (n_objects) 交并比可能小于0.5; 多个目标框可能和同一个先验框交并比最大
            prior_for_each_object = tf.argmax(overlap,axis=1)       #! (n_objects) 

            #! 即将用通过目标框找到的先验框 置换 先验框与目标框的对应关系, 确保每个目标框都有一个交并比最大的先验框与之匹配, 先验框可能没有目标框匹配
            indices = tf.range(n_objects,dtype=object_for_each_prior.dtype)

            #! 更新匹配索引, 使每个目标框都有交并比最大的先验框匹配, 其他先验框与交并比最大的目标框匹配
            object_for_each_prior = tf.tensor_scatter_nd_update(object_for_each_prior, prior_for_each_object[:,None], indices)
            
            #! 为了确保与目标框i相交的先验框j, 且j是被i匹配交并比最大的先验框, 人为地使i与j的交并比大于0.5, 此时必然不会有先验框不被选中
            overlap_for_each_prior = tf.tensor_scatter_nd_update(overlap_for_each_prior,prior_for_each_object[:,None],tf.ones_like(indices,dtype=overlap_for_each_prior.dtype))
            
            #! 筛选出label, 并将交并比小于0.5的置为背景
            label_for_each_prior = tf.gather(labels[i],object_for_each_prior) #! 假设labels的形状为(8732|24564)
            label_for_each_prior = tf.where(tf.less(overlap_for_each_prior,0.5),0,label_for_each_prior) #! 数据类型是int

            #! 更新类别真值
            true_classes = tf.tensor_scatter_nd_update(true_classes,[[i]],tf.cast(label_for_each_prior[None],dtype=true_classes.dtype))

            #! 真实框位置 x1y1x2y2
            prior_boxes = tf.cast(tf.gather(boxes[i], object_for_each_prior), dtype=true_locs.dtype)

            #! 更新位置真值
            true_locs = tf.tensor_scatter_nd_update(true_locs, [[i]], prior_boxes[None])

        #! 预测框位置解码
        predicted_locs = cxcywh_to_xyxy(gcxcywh_to_cxcywh(predicted_locs, self.priors_cxcy))

        positive_priors = true_classes != 0 #! (N,8732) #! 正例

        iou = self.bbox_iou( #! 计算正例的位置损失
            tf.gather_nd(true_locs, tf.where(positive_priors)),      #! 二维(N,8732|24564,4) -> 二维(No,4)
            tf.gather_nd(predicted_locs, tf.where(positive_priors)), #! 二维(N,8732|24564,4) -> 二维(No,4)
            EIoU=True
        ) #! (No,)
        loc_loss = tf.reduce_mean(1.0 - iou)

        n_positives = tf.reduce_sum(tf.cast(positive_priors,dtype=predicted_scores.dtype),axis=1) #! (N,) 正例的数量
        n_hard_negatives = tf.cast(self.neg_pos_ratio * n_positives,dtype=tf.int64)               #! (neg_pos_ratio * N,) 负难例的数量

        conf_loss_all = self.cross_entropy(true_classes,predicted_scores) #! (N,8732|24564) · (N,8732|24564,n_classes) -> (N,8732|24564)

        conf_loss_pos = tf.tensor_scatter_nd_update(
            conf_loss_all,
            tf.where(~positive_priors),
            tf.zeros(shape=(tf.where(~positive_priors).shape[0],),dtype=conf_loss_all.dtype) #! (N,8732|24564)
        )
        conf_loss_neg = conf_loss_all[:] #! 复制一份
        conf_loss_neg = tf.tensor_scatter_nd_update(
            conf_loss_neg,
            tf.where(positive_priors),
            tf.zeros(shape=(tf.where( positive_priors).shape[0],),dtype=conf_loss_neg.dtype) #! (N,8732|24564)
        )
       
        conf_loss_neg_sorted_ind = tf.argsort(conf_loss_neg,direction="DESCENDING")
        conf_loss_sorted_neg = tf.gather(conf_loss_neg,conf_loss_neg_sorted_ind,batch_dims=-1) #! 排序后的值
        hard = tf.gather(conf_loss_sorted_neg,n_hard_negatives,batch_dims=1)[:,None]           #! 取hard分界线的值
        conf_loss_hard_neg = tf.where(tf.greater(conf_loss_sorted_neg,hard),conf_loss_sorted_neg,tf.zeros_like(conf_loss_sorted_neg)) #! 把小于hard的损失全部置0

        conf_loss= (tf.reduce_sum(conf_loss_hard_neg) + tf.reduce_sum(conf_loss_pos)) / tf.reduce_sum(n_positives) #! 对目标多的图片上的损失更敏感

        return conf_loss, loc_loss