import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
        self.max_pool1 = nn.MaxPool2d(kernel_size=2)
        self.relu1 = nn.ReLU()
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
        self.max_pool2 = nn.MaxPool2d(kernel_size=2)
        self.relu2 = nn.ReLU()
        self.flatten = nn.Flatten()
        self.l1 = nn.Linear(4096, 128)
        self.l2 = nn.Linear(128, 10)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        x = self.conv1(x)
        x = self.max_pool2(x)
        x = self.relu1(x)
        x = self.conv2(x)
        x = self.max_pool2(x)
        x = self.relu2(x)
        x = self.flatten(x)
        x = self.l1(x)
        x = self.l2(x)
        x = self.softmax(x)
        return x


dataset_train = torchvision.datasets.CIFAR10("../datasets", train=True, transform=torchvision.transforms.ToTensor(),
                                             download=True)
dataset_test = torchvision.datasets.CIFAR10("../datasets", train=False, transform=torchvision.transforms.ToTensor(),
                                            download=True)

dataloader_train = DataLoader(dataset_train, 64, shuffle=True)
dataloader_test = DataLoader(dataset_test, 64, shuffle=False)


model = Model()
loss_fn = nn.CrossEntropyLoss()
optim = torch.optim.SGD(model.parameters(), lr=0.1)


def data_train(net, loss, optimizer, dataloader):
    net.train()
    for epoch in range(20):
        loss_sum = 0
        for images, targets in dataloader:
            optim.zero_grad()
            outputs = model.forward(images)
            result_loss = loss(outputs, targets)
            result_loss.backward()
            optimizer.step()
            loss_sum += result_loss.item()
        print(f"第{epoch+1}轮训练, Loss:{loss_sum/len(dataloader)}")


data_train(model, loss_fn, optim, dataloader_train)


def data_test(net, loss, dataloader, dataset):
    net.eval()
    loss_sum = 0
    correct_sum = 0
    with torch.no_grad():
        for images, targets in dataloader:
            outputs = net.forward(images)
            result_loss = loss(outputs, targets)
            loss_sum += result_loss.item()
            correct_num = (torch.argmax(outputs, dim=1) == targets).sum()
            correct_sum += correct_num.item()
        print(f"Loss:{loss_sum/len(dataloader)}, Accuracy:{correct_sum*100/len(dataset)}%")


data_test(model, loss_fn, dataloader_test, dataset_test)
