import torch
import torch.nn.functional as F
import torch.nn as nn
input = torch.randn(2, 3)
target = torch.tensor([0, 2])

# 使用公式来计算
one_hot = F.one_hot(target).float() # 对标签进行one_hot编码
p = torch.exp(input)/torch.sum(torch.exp(input), dim = 1).reshape(-1, 1)
loss_com = -torch.sum(one_hot * torch.log(p)) / target.shape[0]


# 下面用 torch.nn.function 实现
log_softmax = F.log_softmax(input, dim = 1)
loss_func = F.nll_loss(log_softmax, target) # 无需对标签做 one_hot 编码


# 最后我们直接用 torch.nn.CrossEntropyLoss 验证
loss_entropy = F.cross_entropy(input, target)

print(f"input:\n{input} \n")
print(f"target:\n{target} \n")
print(f"one_hot:\n{one_hot} \n")
print(f"p(softmax(input)):\n{p} \n")
print(f"log_softmax:\n{log_softmax}\n")
print(f"loss_com:{loss_com}; \nloss_func:{loss_func};\nloss_entropy:{loss_entropy}")
