import torch
import torch.nn as nn

# 生成随机的标签和模型预测输出
labels = torch.randint(0, 5, (4,))  # 生成大小为 (4,) 的随机整数标签，假设类别数为 5
outputs = torch.randn(4, 5)  # 生成大小为 (4, 5) 的随机模型输出（假设是一个多分类问题，有 5 个类别）

# 打印生成的标签和模型输出
print("Labels:", labels)
print("Outputs:", outputs)

# 计算损失，将预测值和实际标签传入损失函数
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
print("Loss:", loss)

pre = torch.tensor([[0.8, 0.0, 0.0, 0.0],
                    [0.0, 0.9, 0.0, 0.1],
                    [0.0, 0.0, 0.9, 0.1],
                    [0.0, 0.2, 0.0, 0.8]], dtype=torch.float)
tgt = torch.tensor([0, 1, 2, 3], dtype=torch.long)  # 目标标签应该在正确的类别范围内
tgt_onehot = torch.tensor([[1, 0, 0, 0],
                    [0, 1, 0, 0],
                    [0, 0, 1, 0],
                    [0, 0, 0, 1]], dtype=torch.int)
# criterion = nn.CrossEntropyLoss()
# loss = criterion(pre, tgt)
print("Labels:", tgt)
print("Outputs:", pre)
# print("Loss:", loss)
print("1.softmax")
print(torch.softmax(pre, dim=-1))
print("2.取对数")
print(torch.log(torch.softmax(pre, dim=-1)))
print("3.与真实值相乘")
print(-torch.sum(torch.mul(torch.log(torch.softmax(pre, dim=-1)), tgt_onehot), dim=-1))
print("4.取平均")
print(torch.mean(-torch.sum(torch.mul(torch.log(torch.softmax(pre, dim=-1)), tgt_onehot), dim=-1)))
print()
print("调用损失函数:")
print(criterion(pre, tgt))