import torch
import torchvision.datasets
from torch import nn
# from torch.utils.tensorboard import SummaryWriter

mnist = torchvision.datasets.MNIST('../data/', train=True, download=True, transform=torchvision.transforms.ToTensor())
mnist_test = torchvision.datasets.MNIST('../data/', train=False, download=True, transform=torchvision.transforms.ToTensor())
dataloader = torch.utils.data.DataLoader(mnist, 28)

dataloader_test = torch.utils.data.DataLoader(mnist_test, 28)

print(len(mnist))


# [1*28*28, label]
# for k, v in mnist:
#     print(k, v)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.seq = nn.Sequential(
            nn.Conv2d(1, 28, 3, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(28, 28, 3, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(28, 56, 3, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(1400, 28),
            nn.Linear(28, 10)
        )

    def forward(self, x):
        return self.seq(x)


if __name__ == '__main__':
    # data_ = torch.Tensor(56, 1, 28, 28)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net = Net()
    net.to(device)
    # y = net(data_)
    # print(y)
    # writer = SummaryWriter('../logs')
    loss_fn = nn.CrossEntropyLoss()
    loss_fn = loss_fn.to(device)
    total_train_step = 0
    optimizer = torch.optim.SGD(net.parameters(), lr=0.005, momentum=0.9, weight_decay=5e-4)
    epoch = 5

    for i in range(epoch):
        for x, label in dataloader:
            x = x.to(device)
            label = label.to(device)
            output = net(x)
            output = output.to(device)
            loss = loss_fn(output, label)
            loss = loss.to(device)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if total_train_step % 100 == 0:
                print('epoch:[{:2d}], step:{:5d}, loss:{:.5f}'.format(i + 1, total_train_step, loss.item()))
                # writer.add_scalar('mnist_001', loss.item(), total_train_step)
            total_train_step += 1
        total_accuracy = 0
        total_loss = 0
        with torch.no_grad():
            for test, label in dataloader_test:
                test = test.to(device)
                label = label.to(device)
                output = net(test)
                output.to(device)
                # print(f'output:{output.item()}, label:{label.item()}')
                loss = loss_fn(output, label)
                loss = loss.to(device)
                accuracy = (output.argmax(1) == label).sum()
                total_accuracy += accuracy
                total_loss += loss.item()
            print('total accuracy={:.5f}'.format(total_accuracy/len(mnist_test)))
            print('total loss={:.5f}'.format(total_loss/len(mnist_test)))
        torch.save(net, 'mnist_00{}.pth'.format(i))

    # writer.close()
