#LossFunction误差
import torch
from torch.nn import L1Loss
from torch import nn

input = torch.tensor([1,2,3],dtype=torch.float32)
target = torch.tensor([1,2,5],dtype=torch.float32)

input = torch.reshape(input,[1,1,1,3])
target = torch.reshape(target,[1,1,1,3])
#使用均方差，或者当reduction为sum时，就为误差和，不进行平均，当reduction为mean就是进行平均
loss = L1Loss(reduction="sum")
result = loss(input,target)
#使用均方法，也就是对差进行平方求和，然后再求平均
loss_mse = nn.MSELoss()
result_mse = loss_mse(input,target)

print(result)
print(result_mse)

#交叉熵损失函数
x = torch.tensor([0.1,0.2,0.3])
y = torch.tensor([1])
loss_cross = nn.CrossEntropyLoss()
x = torch.reshape(x,[1,3])#交叉熵损失函数的input必须是（N,C)形式的
result_loss = loss_cross(x,y)
print(result_loss)