import torch
import torchvision
from tqdm import tqdm
import matplotlib.pyplot


# By: Elwin https://editor.csdn.net/md?not_checkout=1&articleId=112980305
# 官方quickstart_tutorial: https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html
# 由MIMIC收集整理

# 构建一个神经网络Net,继承于nn.module的类
class Net(torch.nn.Module):
    def __init__(self):  # define the layers of the network in the __init__ function（在__init__函数中定义网络层）
        super(Net, self).__init__()
        self.model = torch.nn.Sequential(
            # The size of the picture is 28x28
            torch.nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),

            # The size of the picture is 14x14
            torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2, stride=2),

            # The size of the picture is 7x7
            torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
            torch.nn.ReLU(),

            torch.nn.Flatten(),
            torch.nn.Linear(in_features=7 * 7 * 64, out_features=128),
            torch.nn.ReLU(),
            torch.nn.Linear(in_features=128, out_features=10),
            torch.nn.Softmax(dim=1)
        )

    # specify how data will pass through the network in the forward function（在forward函数中指定数据如何通过网络）
    def forward(self, input):
        output = self.model(input)
        return output


# To accelerate operations in the neural network, we move it to the GPU or MPS if available.(！官网说明)
# 如果网络能在GPU中训练，就使用GPU；否则使用CPU进行训练
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# 这个函数包括了两个操作：将图片转换为张量，以及将图片进行归一化处理
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
                                            torchvision.transforms.Normalize(mean=[0.5], std=[0.5])])

BATCH_SIZE = 256  # 每个批次 = 256
EPOCHS = 10  # 10个循环
trainData = torchvision.datasets.MNIST('./data/', train=True, transform=transform, download=True)  #
testData = torchvision.datasets.MNIST('./data/', train=False, transform=transform)

trainDataLoader = torch.utils.data.DataLoader(dataset=trainData, batch_size=BATCH_SIZE, shuffle=True)
testDataLoader = torch.utils.data.DataLoader(dataset=testData, batch_size=BATCH_SIZE)  # 数据集准备完毕
net = Net()  # 实例化Net(创建实例对象)
print("移动后的模型表示：")
print(net.to(device)) # 将tensor变量copy一份到device所指定的GPU上（"cuda:0"）

# To train a model, we need a loss function and an optimizer.(！官网说明，需要损失损失函数和优化器)
lossF = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters())

# 训练部分，优化模型参数
history = {'Test Loss': [], 'Test Accuracy': []}
for epoch in range(1, EPOCHS + 1):
    processBar = tqdm(trainDataLoader, unit='step')     # tqdm(训练批次256, unit=进度条单位)
    net.train(True)     #
    for step, (trainImgs, labels) in enumerate(processBar):
        trainImgs = trainImgs.to(device)
        labels = labels.to(device)

        net.zero_grad()
        outputs = net(trainImgs)    # Compute prediction error 模型对训练集进行预测
        loss = lossF(outputs, labels)   # 计算误差
        predictions = torch.argmax(outputs, dim=1)
        accuracy = torch.sum(predictions == labels) / labels.shape[0]
        loss.backward()     # Backpropagation反向传播预测，调整模型参数

        optimizer.step()
        processBar.set_description("[%d/%d] Loss: %.4f, Acc: %.4f" %
                                   (epoch, EPOCHS, loss.item(), accuracy.item()))

        if step == len(processBar) - 1:
            correct, totalLoss = 0, 0
            net.train(False)    # 关闭模型的训练状态
            with torch.no_grad():   # 对测试集的DataLoader进行迭代
                for testImgs, labels in testDataLoader:
                    testImgs = testImgs.to(device)
                    labels = labels.to(device)
                    outputs = net(testImgs)
                    loss = lossF(outputs, labels)
                    predictions = torch.argmax(outputs, dim=1)

                    totalLoss += loss
                    correct += torch.sum(predictions == labels)

                testAccuracy = correct / (BATCH_SIZE * len(testDataLoader))
                testLoss = totalLoss / len(testDataLoader)
                history['Test Loss'].append(testLoss.item())
                history['Test Accuracy'].append(testAccuracy.item())

                processBar.set_description("[%d/%d] Loss: %.4f, Acc: %.4f, Test Loss: %.4f, Test Acc: %.4f" %
                                           (epoch, EPOCHS, loss.item(), accuracy.item(), testLoss.item(),
                                            testAccuracy.item()))
processBar.close()

matplotlib.pyplot.plot(history['Test Loss'], label='Test Loss')
matplotlib.pyplot.legend(loc='best')
matplotlib.pyplot.grid(True)
matplotlib.pyplot.xlabel('Epoch')
matplotlib.pyplot.ylabel('Loss')
matplotlib.pyplot.show()

matplotlib.pyplot.plot(history['Test Accuracy'], color='red', label='Test Accuracy')
matplotlib.pyplot.legend(loc='best')
matplotlib.pyplot.grid(True)
matplotlib.pyplot.xlabel('Epoch')
matplotlib.pyplot.ylabel('Accuracy')
matplotlib.pyplot.show()

# torch.save(net, './model.pth')
'''
--------<*关于模型的保存与调用*>--------
# 保存整个网络
torch.save(net, PATH) 
# 保存网络中的参数, 速度快，占空间少
torch.save(net.state_dict(),PATH)
#--------------------------------------------------
#针对上面一般的保存方法，加载的方法分别是：
model_dict=torch.load(PATH)
model_dict=model.load_state_dict(torch.load(PATH))
'''