# zero-index编码变为one-hot编码（y_true） 
import torch
# def zeroToOneHot(n,zero_index):
#    E = torch.zeros(n)
#    oneHot = E.scatter_(1, torch.tensor([zero_index]), 1)
#    return oneHot

def zeroToOneHot(n,zero_index):
    E = torch.eye(n)
    oneHot = E[zero_index]
    return oneHot

# 在backbone的后面替我们接了一个softmax
def softmax(o):
    return torch.exp(o) / torch.exp(o).sum()
# 计算预测值y_pred和y_true的差异

def ce(y_true, y_pred):
    return -(y_true * torch.log(y_pred)).sum()

# A组：

#假设样本的真实标签为：0
y_true = zeroToOneHot(2,0)
#假设经由网络预测以后的o是(20,30)
y_predict = softmax(torch.tensor([20.1,20]).float())
print(ce(y_true,y_predict))

# B组：
import torch
from torch import nn
from torch.nn import functional as F
y_true = torch.tensor([0])
y_predict = torch.tensor([20.1,20]).float().view(1,-1)
ce = nn.CrossEntropyLoss()
print(ce(y_predict,y_true))

if __name__ == '__main__':
   print(zeroToOneHot(2,0))