# TODO: 常见的损失函数
# DATE: 2022/3/19
# AUTHOR: Cheng Ze WUST
import numpy as np
from NumberImgRecognition.dataset.mnist import load_mnist
import tensorflow as tf
import os
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'

y1=[0.1,0.05,0.6,0.0,0.05,0.1,0.0,0.1,0.0,0.0]  #结果-正确
y2=[0.1,0.05,0.1,0.0,0.05,0.1,0.0,0.6,0.0,0.0]  #结果-错误
t= [0,0,1,0,0,0,0,0,0,0]    #实际
#均方误差MSE
def mean_squared_error(y,t):
    n=len(y)
    return (np.sum((np.array(y)-np.array(t))**2))/n
print(mean_squared_error(y1,t))
print(mean_squared_error(y2,t))

#交叉熵误差
def cross_entropy_error(y,t):
    delta=1e-7
    #以防0参与log运算，加一个无穷小数
    return -np.sum(np.array(t)*np.log(np.array(y)+delta))
print(cross_entropy_error(y1,t))
print(cross_entropy_error(y2,t))


#region mini-batch学习（小批量交叉熵误差）
(x_train,t_train),(x_test,t_test)=load_mnist(normalize=True,one_hot_label=True)
print(x_train.shape)    #60000张 784像素
print(t_train.shape)    #10个分类
batch_size=1000
train_size=x_train.shape[0]
batch_mask=np.random.choice(train_size,batch_size)
print(batch_mask)   #随机获得的训练数据索引
x_batch=x_train[batch_mask]
t_batch=t_train[batch_mask]
print(x_batch.shape)    #获得1000个

def cross_entropy_loss(y,t):
    if y.ndim==1:
        t=t.reshape(1,t.size)
        print(y)
        y=y.reshape(1,y.size)
        print(y)
    batch_size=y.shape[0]
    delta = 1e-7
    return -np.sum(t * np.log(y + delta))/batch_size

y3=[0.1,0.1,0.6,0.0,0.05,0.1,0.0,0.05,0.0,0.0]
y4=[0.0,0.09,0.8,0.0,0.05,0.0,0.0,0.0,0.0,0.06]
print(cross_entropy_loss(np.array(y1),np.array(t)))
print(cross_entropy_loss(np.array(y2),np.array(t)))
print(cross_entropy_loss(np.array([y1,y2]),np.array(t)))
print(cross_entropy_loss(np.array([y1,y2,y3]),np.array(t)))
print(cross_entropy_loss(np.array([y1,y2,y3,y4]),np.array(t)))  #随着样本增大，交叉熵越来越小

def cross_entropy_loss1(y,t):
    if y.ndim==1:
        t=t.reshape(1,t.size)
        y=y.reshape(1,y.size)
    batch_size=y.shape[0]
    delta = 1e-7
    return -np.sum(np.log(y[np.arange(batch_size),t] + delta))/batch_size

print('----------')
print(cross_entropy_loss1(np.array(y1),np.array([2])))
print(cross_entropy_loss1(np.array(y2),np.array([2])))
print(cross_entropy_loss1(np.array([y1,y2]),np.array([2,2])))
print(cross_entropy_loss1(np.array([y1,y2,y3]),np.array([2,2,3])))
print(cross_entropy_loss1(np.array([y1,y2,y3,y4]),np.array([2,2,3,4])))
#endregion


#region 回归损失函数(tensorflow)
y_array=tf.linspace(-1.,1.,500)
target=tf.constant(0.)
#均方误差
def square_loss(y,t):
    return tf.square(y-t).numpy()
out1=square_loss(y_array,target)

#绝对值误差
def abs_loss(y,t):
    return tf.abs(y-t).numpy()
out2=abs_loss(y_array,target)

#pseudo-huber损失函数
def pseudo_huber_loss(y,t,d):
    d=tf.constant(d)
    return tf.multiply(tf.square(d),tf.sqrt(1.+tf.square((y-t)/d))-1.).numpy()
out3=pseudo_huber_loss(y_array,target,0.25)
out4=pseudo_huber_loss(y_array,target,5.)

plt.plot(y_array,out1,'b-',label='Mean-Square Loss')
plt.plot(y_array,out2,'r--',label='Mean-Abs Loss')
plt.plot(y_array,out3,'k-',label='P-Huber Loss(0.25)')
plt.plot(y_array,out4,'g:',label='P-Huber Loss(5.0)')
plt.ylim(-.2,.4)
plt.legend(loc='lower right',prop={'size':11})
plt.grid()
plt.show()
#endregion


#region 分类损失函数
y_array=tf.linspace(-3.,5.,500)
target=tf.constant(1.)
targets=tf.fill([500,],1.)
#Huber损失函数
def Huber_loss(y,t):
    return tf.maximum(0.,1.-tf.multiply(t,y)).numpy()
out1=Huber_loss(y_array,targets)
print(out1)

#Cross entropy
def cross_entropy_loss(y,t):
    return -tf.multiply(t,tf.math.log(y))-tf.multiply((1.-t),tf.math.log(1.-y))
out2=cross_entropy_loss(y_array,targets)
print(out2)

#Cross entropy sigmoid loss
def cross_entropy_sigmoid_loss(y,t):
    return tf.nn.sigmoid_cross_entropy_with_logits(logits=y_array,labels=targets)
out3=cross_entropy_sigmoid_loss(y_array,targets)
print(out3)

#Weigh entropy loss
def weigh_cross_entropy_loss(y,t,weight):
    return tf.nn.weighted_cross_entropy_with_logits(logits=y_array, labels=targets,pos_weight=weight)
out4=weigh_cross_entropy_loss(y_array,targets,0.5)

plt.plot(y_array,out1,'b-',label='Huber loss')
plt.plot(y_array,out2,'r--',label='Cross entropy')
plt.plot(y_array,out3,'k-',label='Cross entropy sigmoid')
plt.plot(y_array,out4,'g:',label='Weigh entropy')
plt.ylim(-1.5,3)
plt.legend(loc='lower right',prop={'size':12})
plt.grid()  #显示网格
plt.show()
#endregion

