import torch
import torchvision.datasets as dataset
import torchvision.transforms as transforms
import torch.utils.data as datautils

from cnn import CNN

# data
train_data = dataset.MNIST(
    "mnist",
    train=True,
    transform=transforms.ToTensor(),
    download=True
)

test_data = dataset.MNIST(
    "mnist",
    train=False,
    transform=transforms.ToTensor(),
    download=True
)

# batchsize
n = 64
train_loader = datautils.DataLoader(
    dataset=train_data,
    batch_size=n,
    shuffle=True
)
test_loader = datautils.DataLoader(
    dataset=test_data,
    batch_size=n,
    shuffle=True
)

# net
# class CNN(torch.nn.Module):
#     def __init__(self):
#         super().__init__()
#         self.conv = torch.nn.Sequential(
#             torch.nn.Conv2d(1, 32, kernel_size=5, padding=2),
#             torch.nn.BatchNorm2d(32),
#             torch.nn.ReLU(),
#             torch.nn.MaxPool2d(2)
#         )
#         self.fc = torch.nn.Linear(14 * 14 * 32, 10)
#
#     def forward(self, x):
#         out = self.conv(x)
#         out = out.view(out.size()[0], -1)
#         out = self.fc(out)
#         return out


cnn = CNN()
cnn.cuda()

# loss
loss_func = torch.nn.CrossEntropyLoss()

# optimizer
optimizer = torch.optim.Adam(cnn.parameters(), lr=0.01)

# training
# 全部数据看1遍，为1个epoch
for epoch in range(5):
    # 每次加载batchsize(也就是上面定义的64）张图片
    for i, (images, labels) in enumerate(train_loader):
        print("cpu中，images shape: ", images.shape)  # 64 * 1 * 28 * 28  torch.Size([64, 1, 28, 28])
        print("cpu中，labels shape: ", labels.shape)  # [batchsize] torch.Size([64])
        images = images.cuda()
        labels = labels.cuda()

        # outputs = cnn(images)
        outputs = cnn.forward(images)
        loss = loss_func(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print("loss 是个啥？：", loss) # tensor(0.0046, device='cuda:0', grad_fn=<NllLossBackward0>)
    # 一轮epoch结束后，打印
    print("epoch is {}, "
          "total iter in this epoch is {}, "
          "loss is {}".format(epoch + 1, len(train_data) // n, loss.item()))

#     # eval/test
#     loss_test = 0
#     accuracy = 0
#     for i, (images, labels) in enumerate(test_loader):
#         images = images.cuda()
#         labels = labels.cuda()
#
#         outputs = cnn(images)
#         # [batchsize]
#         # outputs = batchsize * 分类数
#         print("eval outputs.shape: ", outputs.shape)  # torch.Size([64, 10])
#         print("outputs: ", outputs)
#
#         loss_test += loss_func(outputs, labels)
#         _, pred = outputs.max(1)  # 这个是怎么从概率变成，0~9的？
#         print("pred: ", pred)
#         accuracy += (pred == labels).sum().item()
#
#     accuracy = accuracy / len(test_data)
#     loss_test = loss_test / (len(test_data) // n)
#
#     print("epoch is {}, accuracy is {}, "
#           "loss test is {}".format(epoch + 1,
#                                    accuracy,
#                                    loss_test.item()))
#
# # sava
# torch.save(cnn, "model/mnist_cls.pkl")
