# Fonction de coût
import keras.backend as K
import tensorflow as tf
import tensorflow.contrib.slim as slim
from config import ignore_thresh, scale_num, model_type, MAX_BOX_NUM

from detect_function import yolo_head,order_matrix_net


def compute_loss(yolo_outputs, y_true, anchors,num_classes,is_training, ignore_thresh=ignore_thresh, print_loss=False):
    """
        Return yolo_loss tensor
    :param YOLO_outputs: list of 3 sortie of yolo_neural_network, ko phai cua predict (N, 13, 13, 3*5)
    :param Y_true: list(3 array) [(N,13,13,3,6), (N,26,26,3,6), (N,52,52,3,6)]
    :param anchors: array, shape=(T, 2), wh
    :param ignore_thresh:float, the iou threshold whether to ignore object confidence loss
    :return: loss
    """
    
    # yolo_outputs = YOLO_outputs
    # y_true = Y_true  # output of preprocess_true_boxes [3, None, 13, 13, 3, 2]
    if model_type=='N':
        anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
    else:
        anchor_mask = [[3, 4, 5], [0, 1, 2]]

    
    input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0])) # [416 416]
    grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(scale_num)] # [(13 13)(26 26)(52 52)]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))
    scale_pred_box_list=[tf.TensorArray(tf.float32, size=1, dynamic_size=True,infer_shape=False)
                         for i in range(scale_num)]
    
    xy_loss_total=[]
    wh_loss_total=[]
    seed_loss_total=[]
    class_loss_total=[]
    conf_loss_total=[]
    log_data=[]
    for l in range(scale_num):
        # [m,13,13,3,1]
        object_mask = y_true[l][..., 6:7]
        true_class_probs = y_true[l][..., 8:]

        # pred_xy, pred_wh 是满足 label 定义的预测,yolo_head 是将论文定义的格式(相对于当前grid的位置)转换为 label 的
        # 格式(相对于全图的位置)
        # sigmoid(raw_pred) 是相对于cell的位置
        # yolo_outputs[l]=tf.Print(yolo_outputs[l],[yolo_outputs[l]],'yolo_outputs[%d]'%(l))
        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]], num_classes,input_shape, calc_loss=True)
        pred_box = K.concatenate([pred_xy, pred_wh]) # [None, 13, 13, 3, 4]

        # 相对于 cell 的位置 范围[0,1]
        raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid   # sigmoid(tx)的标准答案
        # raw_true_seed 不一定在 [0,1]内，因为conf=1 是根据xy坐标确定的
        raw_true_seed = y_true[l][..., 4:6] * grid_shapes[l][::-1] - grid   # sigmoid(tx)的标准答案
        # e^(raw_true_wh) 是相对于 anchor 的大小
        raw_true_wh = K.log(y_true[l][..., 2:4]* input_shape[::-1] / anchors[anchor_mask[l]]) # tw 的标准答案
        raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh))  # avoid log(0)=-inf， y_true中有大量0
        box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
        # if l==1:
        #     log_data.append(raw_pred)
        #     log_data.append(box_loss_scale)
        #     log_data.append(raw_true_xy)
        #     log_data.append(raw_true_seed)

        # Find ignore mask, iterate over each of batch.  after created, it's empty
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')

        def loop_body(b, ignore_mask,scale_pred_box):
            # [num__true,4] true_box 关于特定(中心落入13x13之一)格子里
            # 的特定(3个中IOU最大)anchor的偏移和缩放, 消除了位置信息，保留形状信息
            # mask 是在 13 x13 x3个格子中有 num__true 不为0
            # true_box[...,:2] 是相对于全图的位置
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0]) #[num__true,4]
            # [13, 13, 3, 4] [13, 13, 3]   返回 [num__true,4]
            true_pred = tf.boolean_mask(pred_box[b], object_mask_bool[b, ..., 0])
            order_pred = tf.boolean_mask(y_true[l][b, ..., 7:8], object_mask_bool[b, ..., 0]) #[num__true,1]
            true_pred=tf.concat([true_pred,order_pred],axis=-1) #[num__true,5]
            scale_pred_box=scale_pred_box.write(b, true_pred, K.dtype(true_box))
            
            # 经过yolo_head处理，pred_box[...,:2] 是相对于全图的位置
            iou,_ = box_IoU(pred_box[b], true_box) # [13, 13, 3, 4] [num__true,4]   返回 [13，13，3，num__true]
            # 当本尺度的true_box没有目标时，num__true 可能为 0，导致 best_iou出现 -inf
            # 每个 anchor 和所有ground truth的IOU中的最大值。
            best_iou = K.max(iou, axis=-1) #[13，13，3]
            # best_iou=tf.Print(best_iou,[best_iou],'best_iou')
            # 非最佳的格子只有和所有(此处用max来判断)ground truth的 IOU 都小于某阈值，才能向不存在优化，否则不优化
            # ignore_mask =1 的位置会被优化为 0 (-inf 属于该类，因为小于 thresh)， ignore_mask =0 的位置不被优化
            ignore_mask = ignore_mask.write(b, K.cast(best_iou < ignore_thresh, K.dtype(true_box))) #[13，13，3]
            return b + 1, ignore_mask,scale_pred_box

        _, ignore_mask,scale_pred_box_list[l] = tf.while_loop(lambda b, *args: b < m, loop_body,
                                                       [0, ignore_mask,scale_pred_box_list[l]])
        ignore_mask = ignore_mask.stack()  # [m,13，13，3]
        ignore_mask = K.expand_dims(ignore_mask, -1)

        # K.binary_crossentropy is helpful to avoid exp overflow.   [None, 13, 13, 3, 2] [None, 13, 13, 3, 2]
        #  binary_crossentropy : first term is label, sigmoid （from_logits=True）of second term is predict
        # cross_entropy 的特殊用法，label不是非0既1的分布，而是连续分布，这也是交叉熵的本意
        xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[..., 0:2],
                                                                       from_logits=True)
        wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh - raw_pred[..., 2:4])
        # seed_loss = object_mask* box_loss_scale * K.binary_crossentropy(raw_true_seed, raw_pred[..., 4:6],
        #                                                                from_logits=True)
        seed_loss = object_mask* box_loss_scale * K.square(raw_true_seed-raw_pred[..., 4:6])
        confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[..., 6:7], from_logits=True) + \
                          (1 - object_mask) * K.binary_crossentropy(object_mask, raw_pred[..., 6:7],
                                                                    from_logits=True) * ignore_mask
        
        # feature_scale_list.append(feature)
        # feature = tf.boolean_mask(yolo_outputs[l][...,5:], object_mask_bool[..., 0])  # [num__true,10]
        # pred_box=tf.boolean_mask(pred_box, object_mask_bool[..., 0])  # [num__true,4]
        # relation_input=tf.concat([pred_box,feature],axis=-1)  # [num__true,14]
        #
        # raw_2 = slim.fully_connected(feature,)
        
        # class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[..., 7:], from_logits=True)

        class_loss=object_mask[...,0] * tf.nn.softmax_cross_entropy_with_logits_v2(labels=true_class_probs, logits=raw_pred[..., 7:])
        class_output = tf.argmax(raw_pred[..., 7:], axis=-1)
        class_label = tf.argmax(y_true[l][..., 8:], axis=-1)
        
        
        correct_prediction = tf.equal(class_label, class_output)
        correct_prediction=tf.boolean_mask(correct_prediction, object_mask_bool[..., 0])
        # 理想值为1
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        
        # loss 用 sum 而不用mean 是因为每个sample的信息量不同，应该保留这种不同，信息量大的贡献大
        xy_loss = K.sum(xy_loss) / mf # /mf 由求和变为求平均
        wh_loss = K.sum(wh_loss) / mf
        seed_loss = K.sum(seed_loss) / mf # /mf 由求和变为求平均
        # seed_loss=tf.Print(seed_loss,[seed_loss],"seed_loss is ")

        class_loss = K.sum(class_loss) / mf # /mf 由求和变为求平均
        confidence_loss = K.sum(confidence_loss) / mf
        
        xy_loss_total.append(xy_loss)
        wh_loss_total.append(wh_loss)
        conf_loss_total.append(confidence_loss)
        class_loss_total.append(class_loss)
        seed_loss_total.append(seed_loss)
        
        loss += xy_loss + wh_loss + confidence_loss + class_loss+ seed_loss
        if print_loss:
            loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, K.sum(ignore_mask)],
                            message='loss: ')
        
    def collect_scale(b, pred_box_batch,mask_batch):
        #  [num_true_sample,5]
        sample_scale=[x.read(b) for x in scale_pred_box_list]
        sample_pred_box=tf.concat(sample_scale,axis=0)
        num_true_sample=tf.shape(sample_pred_box)[0]
        mask_kernel=tf.ones([num_true_sample,num_true_sample])
        mask_paddings=[[0, MAX_BOX_NUM - num_true_sample], [0, MAX_BOX_NUM - num_true_sample]]
        mask = tf.pad(mask_kernel, mask_paddings, 'CONSTANT', constant_values=0)
        mask=tf.cast(mask,tf.bool)
        mask_batch=mask_batch.write(b,mask)

        # 按照 order 升序排列
        _, index = tf.nn.top_k(sample_pred_box[:, 4], k=num_true_sample, sorted=True)
        #  [num_true_sample,4]
        sample_pred_box = tf.gather(sample_pred_box[:,:4], index[::-1], axis=0)
        paddings = [[0, MAX_BOX_NUM - num_true_sample], [0, 0]]
        #  [16,4]
        sample_pred_box = tf.pad(sample_pred_box, paddings, 'CONSTANT', constant_values=0)
        # [16,4]
        pred_box_batch=pred_box_batch.write(b,sample_pred_box)
        return b+1,pred_box_batch,mask_batch

    pred_box_batch = tf.TensorArray(tf.float32, size=1, dynamic_size=True)
    mask_batch = tf.TensorArray(tf.bool, size=1, dynamic_size=True)
    c = lambda i,*args: tf.less(i, m)
    _,pred_box_batch,mask_batch = tf.while_loop(c, collect_scale, [0,pred_box_batch,mask_batch])
    output = pred_box_batch.stack()  # 将list 合成 tensor    #[m,16,4]
    mask = mask_batch.stack()  # [m,16,16]
    
    order_matrix=order_matrix_net(output, reuse=False, is_training=is_training) #[m,16,16]
    

    order_loss,wrong_order=get_order_loss(order_matrix,mask)
    loss=loss+order_loss
    total_loss=[0]*6
    for i in range(scale_num):
        total_loss[0]+=xy_loss_total[i]
        total_loss[1]+=wh_loss_total[i]
        total_loss[2]+=conf_loss_total[i]
        total_loss[3]+=class_loss_total[i]
        total_loss[4]+=seed_loss_total[i]
    total_loss[5]=order_loss
    
    
    
    return loss,wrong_order,accuracy,total_loss,log_data



def get_order_loss(input,mask):
    '''
    :param input: [m,16,16]
    :param mask: [m,16,16]
    :return:
    '''
    batch_num=tf.cast(tf.shape(input)[0],tf.float32)
    diag_vals = tf.ones_like(input)
    # 对角线为1的下三角矩阵
    tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # [m,16,16]
    tril = tf.boolean_mask(tril,mask) #[batch_sum(num_true_box^2)]
    input = tf.boolean_mask(input,mask) #[batch_sum(num_true_box^2)]
    # tril=tf.reshape(tril,[-1,16*16])
    # input=tf.reshape(input,[-1,16*16])
    order_loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=tril,logits=input) #[m,256]
    order_loss=tf.div(tf.reduce_sum(order_loss),batch_num)
    input=tf.sigmoid(input)
    wrong_num=tf.div(tf.reduce_sum(tf.cast(tf.abs(tril-input)>0.5,tf.float32)),batch_num)
    return order_loss,wrong_num

def box_IoU(b1, b2):
    # 预测 [13 13  3  4]   # 真实[5 4]   5是num_true
    """
    Calculer IoU between 2 BBs
    # hoi bi nguoc han tinh left bottom, right top TODO
    :param b1: predicted box, shape=[None, 13, 13, 3, 4], 4: xywh
    :param b2: true box, shape=[None, 13, 13, 3, 4], 4: xywh
    :return: iou: intersection of 2 BBs, tensor, shape=[None, 13, 13, 3, 1] ,1: IoU
    b = tf.cast(b, dtype=tf.float32)
    """
    with tf.name_scope('BB1'):
        # [13 13  3  4]
        """Calculate 2 corners: {left bottom, right top} and area of this box"""
        b1 = tf.expand_dims(b1, -2)  # shape= (None, 13, 13, 3, 1, 4)
        b1_xy = b1[..., :2]  # x,y shape=(None, 13, 13, 3, 1, 2)
        b1_wh = b1[..., 2:4]  # w,h shape=(None, 13, 13, 3, 1, 2)
        b1_wh_half = b1_wh / 2.  # w/2, h/2 shape= (None, 13, 13, 3, 1, 2)
        b1_mins = b1_xy - b1_wh_half  # x,y: left bottom corner of BB [13, 13, 3, 1, 2]
        b1_maxes = b1_xy + b1_wh_half  # x,y: right top corner of BB
        b1_area = b1_wh[..., 0] * b1_wh[..., 1]  # w1 * h1 (13, 13, 3, 1)
        # [13 13  3  1]
    with tf.name_scope('BB2'):
        # [5 4]
        """Calculate 2 corners: {left bottom, right top} and area of this box"""
        b2 = tf.expand_dims(b2, 0)  # shape= (1, 5, 4)  # TODO 0?  不需要 expand_dims
        b2_xy = b2[..., :2]  # x,y shape=(5, 2)
        b2_wh = b2[..., 2:4]  # w,h shape=( 5, 2)
        b2_wh_half = b2_wh / 2.  # w/2, h/2 shape=(5, 2)
        b2_mins = b2_xy - b2_wh_half  # x,y: left bottom corner of BB [5, 2]
        b2_maxes = b2_xy + b2_wh_half  # x,y: right top corner of BB
        b2_area = b2_wh[..., 0] * b2_wh[..., 1]  # w2 * h2  #[5]

    with tf.name_scope('Intersection'):
        """Calculate 2 corners: {left bottom, right top} based on BB1, BB2 and area of this box"""
        # intersect_mins = tf.maximum(b1_mins, b2_mins, name='left_bottom')  # (None, 13, 13, 3, 1, 2)
        intersect_mins = K.maximum(b1_mins, b2_mins)  # (13, 13, 3, 1, 2) [5, 2]
        # intersect_maxes = tf.minimum(b1_maxes, b2_maxes, name='right_top')  #
        intersect_maxes = K.minimum(b1_maxes, b2_maxes)
        # intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)  # ( 13, 13, 3, 1, 2), 2: w,h
        intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]  # intersection: wi * hi ( 13, 13, 3, 1)

    IoU = tf.divide(intersect_area, (b1_area + b2_area - intersect_area), name='divise-IoU')  # (13, 13, 3, 5)5是真实的box数
    Iou_shape=tf.shape(IoU)
    
    return IoU ,Iou_shape



