

import numpy as np

y = np.array([1, 0, 0])
z = np.array([0.2, 0.1, -0.1])

# softmax
y_pred = np.exp(z) / np.exp(z).sum()
print(y,y_pred,np.log(y_pred))


loss = (-y*np.log(y_pred)).sum()

print(loss)


# 使用CrossEntropyLoss 重写上述代码
import torch

y_t = torch.LongTensor([0])
z_t = torch.Tensor([[0.2,0.1,-0.1]]) # TODO:这里为什么要包一层[]
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(z_t,y_t)
print(loss)

