# import torch
# import torch.nn as nn
# import numpy as np

# # 定义样本数量和类别数量
# num_samples = 4
# num_classes = 3

# # 模拟模型输出（logits）
# logits = torch.randn(num_samples, num_classes, requires_grad=True)
# # 模拟真实标签
# targets = torch.randint(0, num_classes, (num_samples,))

# # 使用 PyTorch 的 nn.CrossEntropyLoss() 计算损失
# criterion = nn.CrossEntropyLoss()
# pytorch_loss = criterion(logits, targets)
# print(f"PyTorch 计算的交叉熵损失: {pytorch_loss.item()}")

# # 将 PyTorch 张量转换为 NumPy 数组
# logits_np = logits.detach().numpy()
# targets_np = targets.numpy()

# # 手动实现 Softmax 函数
# def softmax(x):
#     exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
#     return exp_x / np.sum(exp_x, axis=1, keepdims=True)

# # 手动实现交叉熵损失函数
# def cross_entropy_loss_numpy(logits, targets):
#     probabilities = softmax(logits)
#     num_samples = logits.shape[0]
#     # 选择真实标签对应的概率
#     correct_log_probs = -np.log(probabilities[range(num_samples), targets])
#     return np.mean(correct_log_probs)

# numpy_loss = cross_entropy_loss_numpy(logits_np, targets_np)
# print(f"NumPy 手动计算的交叉熵损失: {numpy_loss}")
    
    
    
import torch
import torch.nn as nn
import numpy as np

# 定义样本数量和类别数量
num_samples = 5
num_classes = 4

# 模拟模型输出（logits）
logits = torch.randn(num_samples, num_classes, requires_grad=True)

# 错误的 targets 示例
wrong_targets = torch.empty(5, 4).random_(10).float()
try:
    criterion = nn.CrossEntropyLoss()
    wrong_loss = criterion(logits, wrong_targets)
except RuntimeError as e:
    print(f"使用错误的 targets 计算损失时出错: {e}")

# 正确的 targets 示例
correct_targets = torch.randint(0, num_classes, (num_samples,))
correct_loss = criterion(logits, correct_targets)
print(f"使用正确的 targets 时 PyTorch 计算的交叉熵损失: {correct_loss.item()}")

# 将 PyTorch 张量转换为 NumPy 数组
logits_np = logits.detach().numpy()
correct_targets_np = correct_targets.numpy()

# 手动实现 Softmax 函数
def softmax(x):
    exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
    return exp_x / np.sum(exp_x, axis=1, keepdims=True)

# 手动实现交叉熵损失函数
def cross_entropy_loss_numpy(logits, targets):
    probabilities = softmax(logits)
    num_samples = logits.shape[0]
    # 选择真实标签对应的概率
    correct_log_probs = -np.log(probabilities[range(num_samples), targets])
    return np.mean(correct_log_probs)

numpy_loss = cross_entropy_loss_numpy(logits_np, correct_targets_np)
print(f"NumPy 手动计算的交叉熵损失: {numpy_loss}")
    