####交叉熵误差  神经网络输出数据与监督数据计算结果越接近0，则代表损失函数值越小
import numpy as np


def cross_entropy_error(y,t):
    delta = 1e-7  #定义一个微小值，防止出现log（0）这样的情况（此时无法计算出数值）
    return - np.sum(t * np.log(y+delta))

if __name__ == '__main__':
    t = [0,0,1,0,0,0,0,0,0,0]  #正确的标签为2
    y_1 = [0.1,0.05,0.6,0.0,0.05,0.1,0.0,0.1,0.0,0.0]  #一种神经网络可能得到的输出，情况一，它推测标签为 2
    y_2 = [0.1,0.05,0.1,0.0,0.05,0.1,0.0,0.6,0.0,0.0]  #另一种神经网络可能得到的输出，情况二，它推测标签为 7
    a_1 = cross_entropy_error(np.array(y_1),np.array(t))  #第一种情况下，使用交叉熵误差得到损失函数值
    a_2 = cross_entropy_error(np.array(y_2),np.array(t))  #第二种情况下，使用交叉熵误差得到损失函数值
    print(a_1) #0.510825457099338,比较接近0，说明损失函数值小，说明推测比较准确
    print(a_2) #2.302584092994546
