"""
损失函数
"""
import torch
import torchvision
from torch import nn
from torch.nn import Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

inputs = torch.tensor([1, 2, 3], dtype=torch.float)
targets = torch.tensor([1, 2, 5], dtype=torch.float)
inputs = torch.reshape(inputs, (1, 1, 1, -1))
targets = torch.reshape(targets, (1, 1, 1, -1))

# L1Loss = [(y1-x1)+(y2-x2)+(y3-x3)+...(yn-xn)] / n
loss1 = nn.L1Loss(reduction="mean")
result1 = loss1(inputs, targets)
print(result1)

# L1Loss = [(y1-x1)+(y2-x2)+(y3-x3)+...(yn-xn)]
loss2 = nn.L1Loss(reduction="sum")
result2 = loss2(inputs, targets)
print(result2)

# MSELoss 均方误差=[(y1-x1)^2+(y2-x2)^2...+(yn-xn)^2] / n
# 通常用作回归问题的损失函数
los_mse = nn.MSELoss()
reu_mse = los_mse(inputs, targets)
print(reu_mse)

# 交叉熵
# 通常用于分类问题

# 预测输出的概率
x = torch.tensor([0.1, 0.2, 0.3])
# 真实结果对应的数组下标
y = torch.tensor([1])
x = torch.reshape(x, (1, 3))
los_cross = nn.CrossEntropyLoss()
# x为 (batchsize, 分类数)
res_cross = los_cross(x, y)
print(res_cross)
