# -*- encoding: utf-8 -*-
'''
@File    :   losses.py
@Time    :   2021/11/22 9:20
@Author  :   ZhangChaoYang
@Desc    :   
'''

import tensorflow as tf
import numpy as np
import keras.losses
from tensorflow_probability.python.distributions.normal import Normal


def square_loss(x, x_hat):
    '''在最后两个维度上求平均'''
    return tf.reduce_mean(tf.reduce_mean(tf.square(x - x_hat), axis=-1), axis=-1)


def log_loss(x, x_hat):
    '''正样本的最小loss= 0.31326169，负样本的最小loss=0.69314718'''
    return tf.nn.sigmoid_cross_entropy_with_logits(x, x_hat)


def kl_loss(p, q):
    '''以下两种方式等价'''
    return keras.losses.KLDivergence(reduction=tf.keras.losses.Reduction.NONE)(p, q)
    # return tf.reduce_sum(p*tf.math.log(p/q),axis=1)   #跟向量长度有关，向量越长KL散度越大。如果要跟向量长度无关，请使用tf.reduce_mean

    # return tf.reduce_mean(p * tf.math.log(p / q), axis=1)


def log_gaussian_loss(mu, sigma, y_true):
    return -Normal(loc=mu, scale=sigma).log_prob(
        y_true)  # y_true在前面那个正态分布里的概率的log，即代表了似然概率。loss是它的相反数。计算过程不是严格按照前面的描述来的，该loss可能是负数


def maximize_mean_discrepancy(source, target):
    return 0  # TODO


def _test_square_loss():
    x = np.asarray([[[1, 2], [3, 4]], [[1, 2], [3, 4]]])
    x_hat = np.asarray([[[5, 6], [7, 8]], [[5, 6], [7, 8]]])
    print(square_loss(x, x_hat))

    # 以下3种方式等价
    print(tf.reduce_mean(tf.square(x - x_hat)))  # tf.reduce_mean如果不指定axis，会在所有维度上进行求平均
    print(tf.reduce_mean(keras.losses.mean_squared_error(x, x_hat)))
    print(keras.losses.MeanSquaredError()(x, x_hat))  # MeanSquaredError在所有维度上进行求平均


def _test_log_loss():
    logit = np.asarray([[0.1, 0.0, 0.3], [0.4, 1.0, 0.6]])
    label = np.asarray([[1., 0., 1.], [0., 1., 0.]])
    losses = log_loss(label, logit)
    print(losses)


def _test_kl_loss():
    p = np.asarray([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
    q = np.asarray([[0.5, 0.4, 0.8], [0.4, 0.7, 0.3]])
    losses = kl_loss(p, q)
    print(losses)


if __name__ == '__main__':
    pass
    # _test_square_loss()
    # _test_log_loss()
    # _test_kl_loss()
