import numpy as np
import torch
import torchvision

batch_size = 128  # 每次处理一批数据，防止内存爆满
# shuffle 打乱数据，防止数据加载的时候有些相似数据放了一起
train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data', train=True, download=True,
                               transform=torchvision.transforms.Compose(
                                   [torchvision.transforms.ToTensor()])), batch_size=batch_size, shuffle=True
)

test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
                               transform=torchvision.transforms.Compose(
                                   [torchvision.transforms.ToTensor()])), batch_size=batch_size, shuffle=True
)


class CNN(torch.nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv2d(1, 32, kernel_size=5, padding=2),
            torch.nn.BatchNorm2d(32),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2)
        )
        self.fc = torch.nn.Linear(14 * 14 * 32, 10)

    def forward(self, x):
        out = self.conv(x)
        out = out.view(out.size()[0], -1)
        out = self.fc(out)
        return out


cnn = CNN()

loss_fc = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(cnn.parameters(), lr=0.01)

for epoch in range(1):
    for i, (images, labels) in enumerate(train_loader):
        output = cnn(images)
        loss = loss_fc(output, labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print(loss.item())

print("*" * 100)

acc_list = []
loss_list = []
for i, (images, labels) in enumerate(test_loader):
    output = cnn(images)
    loss = loss_fc(output, labels)
    loss_list.append(loss.item())
    _, pred = output.max(1)
    # pred是images的预测值，labels是正确值，查看预测值和正确值是否一致
    acc_list.append((pred == labels).float().mean())
print(np.mean(acc_list))
print(np.mean(loss_list))
