"""
kl_divergence 损失函数
"""
import numpy as np
from keras.losses import KLDivergence, kl_divergence


def get_data():
    """

    :return:
    """
    y_true = [[0.0, 1.0], [0.0, 0.0]]
    y_pred = [[0.6, 0.4], [0.4, 0.6]]
    return np.array(y_true), np.array(y_pred)


def compute_kld(y_true, y_pred):
    """

    :param y_true:
    :param y_pred:
    :return:
    """

    """
    tf.keras.backend.clip(x, min_value, max_value)
    对于数据x，当超出最低值 min_value, 最大值 max_value时，用对应值/阈值填充
    """
    # 为避免为0时，log至负无穷
    y_true = np.maximum(y_true, 1e-7)
    y_pred = np.maximum(y_pred, 1e-7)

    total = (y_true * np.log(y_true / y_pred)).sum(axis=1)

    return total, total.mean()


def run():
    """
    主程序
    :return:
    """
    y_true, y_pred = get_data()

    my_loss_1, my_loss_2 = compute_kld(y_true=y_true, y_pred=y_pred)

    kl_1 = KLDivergence()

    info = 'Class: my: {}; kl: {}\nFunction: my: {}, kl: {}'.\
        format(my_loss_2, kl_1(y_true=y_true, y_pred=y_pred),
               my_loss_1, kl_divergence(y_true=y_true, y_pred=y_pred))
    print(info)


if __name__ == '__main__':
    run()
