#
# minst_t02.py
#
# [MNIST数据集的训练及简单应用](https://blog.csdn.net/m0_53396562/article/details/119387564?spm=1001.2101.3001.6650.1&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-119387564-blog-112063808.pc_relevant_recovery_v2&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-119387564-blog-112063808.pc_relevant_recovery_v2&utm_relevant_index=2)
#
###############################################################################
#
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch import Tensor
import mnist

#
# 1. 定义超参数
#
MNIST_ROOT = "e:/sfxData/DeepLearning"

EPOCHS = 2         # 总共训练批次
BATCH_SIZE = 64     # 批次大小
LR = 0.001          # 学习率
WORKERS = 0         #
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

#
# 2. 准备数据集（包括训练集和测试集）
#
# 定义transform操作。这个操作的目的是对原始图像进行预处理，主要是要将其转换为张量，才能送进神经网络中。
transform = transforms.Compose([
    transforms.ToTensor(),                      # 转换为张量
    transforms.Normalize((0.1307,), (0.3081,))  # 设定标准化值
])

# 训练集
train_dataset = datasets.MNIST(
    root=MNIST_ROOT,
    train=True,
    transform=transform,
    download=True)

# 测试集
# test_dataset = mnist.get_dataset(train=False, transform=transform)
test_dataset = datasets.MNIST(
    root=MNIST_ROOT,
    train=False,
    transform=transform,
    download=True)

# 训练集加载器
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=BATCH_SIZE,
                          shuffle=True)

# 测试集加载器
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=BATCH_SIZE,
                         shuffle=False)

# 测试
def my_test():
    dataiter = iter(train_loader)
    img, _ = next(dataiter)
    # print(Tensor(img).shape)        # [64, 1, 28, 28]
    # print(Tensor(target).shape)     # [64]

    batch_size = Tensor(img).size(0)

    conv1 = torch.nn.Conv2d(1, 10, 5)
    pooling = torch.nn.MaxPool2d(2)
    x = conv1(img)  # [64, 1, 28, 28] => [64, 10, 24, 24]
    x = pooling(x)  # [64, 10, 24, 24] => [64, 10, 12, 12]
    x = F.relu(x)

    conv2 = torch.nn.Conv2d(10, 20, 5)
    x = conv2(x)    # [64, 10, 12, 12] => [64, 20, 8, 8]
    x = pooling(x)  # [64, 20, 8, 8] => [64, 20, 4, 4]
    x = F.relu(x)

    x = x.view(batch_size, -1)  # 改变张量的维度 [64, 20, 4, 4] => [64, 320]
    fc1 = torch.nn.Linear(320, 256)    # 全连接层1
    fc2 = torch.nn.Linear(256, 128)    # 全连接层2
    fc3 = torch.nn.Linear(128, 10)     # 全连接层3
    x = fc1(x)  # [64, 256]
    x = fc2(x)  # [64, 128]
    x = fc3(x)  # [64, 10]
    print(Tensor(x).shape)
    pass

# my_test()


#
# 3. 设计模型   CNN
#


class CNN(torch.nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=(5, 5))     # 卷积层1
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=(5, 5))    # 卷积层2
        self.pooling = torch.nn.MaxPool2d(2)    # 池化层
        self.fc1 = torch.nn.Linear(320, 256)    # 全连接层1
        self.fc2 = torch.nn.Linear(256, 128)    # 全连接层2
        self.fc3 = torch.nn.Linear(128, 10)     # 全连接层3

    def forward(self, x: Tensor):
        batch_size = x.size(0)                  # [64, 1, 28, 28]
        x = F.relu(self.pooling(self.conv1(x)))  # 卷积层1->池化层->激活函数Relu
        x = F.relu(self.pooling(self.conv2(x)))  # 卷积层2->池化层->激活函数Relu
        x = x.view(batch_size, -1)  # 改变张量的维度
        x = self.fc1(x)  # 全连接层1
        x = self.fc2(x)  # 全连接层2
        x = self.fc3(x)  # 全连接层3
        return x


model = CNN()  # 实例化模型

#
# 4. 构造损失函数和优化函数
#
# 损失函数使用交叉熵损失，适用于多分类问题 loss_fn
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=0.5)

# to_device
# model.to(DEVICE)
# criterion.to(DEVICE)

#
# 5. 定义训练函数
#


def train(epoch):
    running_loss = 0.0  # 每一轮训练重新记录损失值
    for batch_idx, data in enumerate(train_loader, 0):  # 提取训练集中每一个样本
        inputs, target = data
        optimizer.zero_grad()  # 将梯度归零
        # print(inputs.shape)
        outputs = model(inputs)  # 代入模型
        loss = criterion(outputs, target)  # 计算损失值
        loss.backward()  # 反向传播计算得到每个参数的梯度值
        optimizer.step()  # 梯度下降参数更新

        running_loss += loss.item()  # 损失值累加
        if batch_idx % 300 == 299:  # 每300个样本输出一下结果
            print('[%d,%5d] loss: %.3f' %
                  (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0          # (训练轮次，  该轮的样本次，  平均损失值)

    return running_loss


#
# 6. 定义测试函数
#

def test():
    total = 0
    correct = 0
    with torch.no_grad():  # 执行计算，但不希望在反向传播中被记录
        for data in test_loader:  # 提取测试集中每一个样本
            images, labels = data
            outputs = model(images)  # 带入模型
            _, pred = torch.max(outputs.data, dim=1)  # 获得结果中的最大值
            total += labels.size(0)  # 测试数++
            # 将预测结果pred与标签labels对比，相同则正确数++
            correct += (pred == labels).sum().item()
            """
            print("imageshape", images.shape)
            print("outputshape", outputs.shape)
            print("_ is", _.shape, _)
            print("pred is", pred.shape, pred)
            print("label is", labels)
            """
        print('%d %%' % (100 * correct / total))  # 输出正确率


if __name__ == '__main__':
    lossy = []  # 定义存放纵轴数据（损失值）的列表
    epochx = []  # 定义存放横轴数据（训练轮数）的列表
    #path = "C:/Users/yas/Desktop/pytorch/MNIST/model/model1.pth"
    for epoch in range(EPOCHS):  # 训练轮次
        epochx.append(epoch)  # 将本轮轮次存入epochy列表
        lossy.append(train(epoch))  # 执行训练，将返回值loss存入lossy列表
        test()  # 每轮训练完都测试一下正确率

    #torch.save(model, path)
    #model = torch.load("C:/Users/yas/Desktop/pytorch/MNIST/model/model1.pth")

    # 可视化一下训练过程
    plt.plot(epochx, lossy)
    plt.grid()
    plt.show()
    pass
