import keras.backend as K
import tensorflow as tf

def getPrecision(y_true, y_pred):
    TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))  # TP
    N = (-1) * K.sum(K.round(K.clip(y_true - K.ones_like(y_true), -1, 0)))  # N
    TN = K.sum(K.round(K.clip((y_true - K.ones_like(y_true)) * (y_pred - K.ones_like(y_pred)), 0, 1)))  # TN
    FP = N - TN
    precision = TP / (TP + FP + K.epsilon())  # TT/P
    return precision


def getRecall(y_true, y_pred):
    """
    0:正例，1,2反例
    这个函数需要1为正例，0为反例，所以需要反转一下，使用tf.map_fn
    先对y_true和y_pred处理一下:
    0=>1
    1,2=>0
    """
    #反转
    y_true1=tf.map_fn(lambda x:1 if x==0 else 0,y_true)
    y_pred1 = tf.map_fn(lambda x: 1 if x == 0 else 0, y_pred)

    #开始计算
    TP = K.sum(y_true1 * y_pred1)  # TP
    P = K.sum(y_true1)#P
    FN = P - TP  # FN=P-TP
    TP=tf.cast(TP,tf.float32)
    FN = tf.cast(FN, tf.float32)
    recall = TP / (TP + FN + K.epsilon())  # TP/(TP+FN)
    return recall


# def getRecall(y_true, y_pred):
#     """
#     旧版getRecall
#     """
#
#     TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))  # TP
#     P = K.sum(K.round(K.clip(y_true, 0, 1)))
#     FN = P - TP  # FN=P-TP
#     TP = tf.cast(TP, tf.float32)
#     FN = tf.cast(FN, tf.float32)
#     recall = TP / (TP + FN + K.epsilon())  # TP/(TP+FN)
#     return recall
