import torch
from torch import nn


def t_l1loss():
    input = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
    target = torch.tensor([2, 5, 8])
    loss_fn = nn.L1Loss()  # 默认reduction='mean'
    loss = loss_fn(input, target)  # 计算MAE
    loss.backward()  # 反向传播
    print(f"MAE: {loss}")


def t_mse():
    input = torch.tensor([1.0, 2.0, 3.0], requires_grad=True, dtype=torch.float32)
    target = torch.tensor([2, 5, 8], dtype=torch.float32)
    loss_fn = nn.MSELoss()  # 默认reduction='mean'
    loss = loss_fn(input, target)  # 计算MSE
    loss.backward()  # 反向传播
    print(f"MSE: {loss}")


def t_cross_entropy():
    # 输入张量 x，包含 3 个未归一化的分数（logits），并启用梯度计算
    x = torch.tensor([0.1, 0.2, 0.9], requires_grad=True)

    # 目标标签 y，表示正确类别是第 1 类（注意：PyTorch 中类别从 0 开始编号）
    y = torch.tensor([1])

    # 将 x 的形状从 (3) 调整为 (1, 3)，即 batch_size=1，num_classes=3
    x = torch.reshape(x, (1, 3))

    # 初始化交叉熵损失函数
    loss_cross = nn.CrossEntropyLoss()

    # 计算损失：输入 x 是未归一化的 logits，y 是目标类别索引
    result_cross = loss_cross(x, y)

    # 打印损失值
    print(f"result_cross:{result_cross}")


if __name__ == '__main__':
    t_l1loss()
    t_mse()
    t_cross_entropy()
