#
# minst_t02.py
#
# [MNIST数据集的训练及简单应用](https://blog.csdn.net/m0_53396562/article/details/119387564?spm=1001.2101.3001.6650.1&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-119387564-blog-112063808.pc_relevant_recovery_v2&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-119387564-blog-112063808.pc_relevant_recovery_v2&utm_relevant_index=2)
#
###############################################################################
#
import torch
import torch.nn as nn
import mnist
from torch import Tensor
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

MNIST_ROOT = "e:/sfxData/DeepLearning"

# 1. 定义超参数
LR = 0.001          # 学习率
EPOCHS = 20         # 总共训练批次
WORKERS = 0         # 工作区？
BATCH_SIZE = 64     # 批次大小
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#
# 训练数据
#
train_dataset = mnist.get_dataset(MNIST_ROOT, train=True)
# print(len(train_dataset))    # 60000
# train_dataset_iter = iter(train_dataset)
# X, y = next(train_dataset_iter)
# print(X.shape)  # [1, 28, 28]

# 得到小批量数据 (shuffle - 打乱顺序)
train_loader = DataLoader(
    dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=WORKERS)

# 迭代器
# dataiter = iter(train_loader)
# images, labels = next(dataiter)
# print(Tensor(images).shape)  # [64, 1, 28, 28]

#
# 测试数据
#
test_dataset = mnist.get_dataset(MNIST_ROOT, train=False)
# print(len(test_dataset))  # 10000
# test_dataset_iter = iter(test_dataset)
# X, y = next(test_dataset_iter)
# print(X.shape)  # [1, 28, 28]
test_loader = DataLoader(
    dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=WORKERS)


class LNet(nn.Module):
    def __init__(self):
        super().__init__()
        # 最初传入的图片的像素点是1*28*28的，最后我们要收敛成10个结果
        self.fc = nn.Linear(1*28*28, 10)
        # 激活函数，通过数学手段将线性计算过程进行优化，使其加速。最常用的线性激活函数Relu
        self.relu = nn.ReLU()

    def forward(self, image):
        image_viwed = image.view(-1, 1*28*28)   # 扁平化 [64, 784]
        X = self.fc(image_viwed)                # 全连接 [64, 10]
        # X = self.relu(X)                        # 激活函数
        return X


# 搭建网络模型
model = LNet()
model = model.to(DEVICE)

# 定义损失函数
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(DEVICE)

# 优化器
optimizer = torch.optim.Adam(model.parameters(), lr=LR)

# 参与运算的两个或多个变量，有的在CPU上，有的在GPU上
# Expected all tensors to be on the same device,
# but found at least two devices, cuda:0 and cpu!
# (when checking argument for argument mat1 in method wrapper_addmm)
# 判断数据是在CPU还是GPU上
# print(data.device)

# 训练


def my_test0():
    for step, (images, labels) in enumerate(train_loader, 0):
        print(step)             # 0
        print(images.shape)     # [64, 1, 28, 28]
        print(labels.shape)     # [64]

        torch.Tensor(images).to(DEVICE)
        torch.Tensor(labels).to(DEVICE)

        optimizer.zero_grad()               # 将梯度归零
        outputs = model(images)             # 代入模型
        loss = criterion(outputs, labels)   # 计算损失值
        loss.backward()                     # 反向传播计算得到每个参数的梯度值
        optimizer.step()                    # 梯度下降参数更新
        break
    pass


my_test0()


def train(epoch):
    running_loss = 0.0  # 每一轮训练重新记录损失值
    for step, (images, labels) in enumerate(train_loader):
        # print(step)
        # print(images.shape)
        # print(labels.shape)
        optimizer.zero_grad()               # 将梯度归零
        outputs = model(images)             # 代入模型
        loss = criterion(outputs, labels)   # 计算损失值
        loss.backward()                     # 反向传播计算得到每个参数的梯度值
        optimizer.step()                    # 梯度下降参数更新

        # 损失值累加
        running_loss += loss.item()

        # 每300个样本输出一下结果
        if step % 300 == 299:
            print('[%d,%5d] loss: %.3f' %   #
                  (epoch + 1, step + 1, running_loss / 300))
            running_loss = 0.0
        # endif
    # endfor
    return running_loss


def test():
    total = 0
    correct = 0
    with torch.no_grad():  # 执行计算，但不希望在反向传播中被记录
        for (images, labels) in test_loader:
            outputs = model(images)
            _, pred = torch.max(outputs.data, dim=1)  # 获得结果中的最大值
            total += labels.size(0)  # 测试数++
            # 将预测结果pred与标签labels对比，相同则正确数++
            correct += (pred == labels).sum().item()
            """
            print("imageshape", images.shape)
            print("outputshape", outputs.shape)
            print("_ is", _.shape, _)
            print("pred is", pred.shape, pred)
            print("label is", labels)
            """
        # end_or
        print('%d %%' % (100 * correct / total))  # 输出正确率
    # end_with
    return


def go():
    lossy = []  # 定义存放纵轴数据（损失值）的列表
    epochx = []  # 定义存放横轴数据（训练轮数）的列表
    # for epoch in range(EPOCHS):  # 训练轮次
    for epoch in range(5):  # 训练轮次
        epochx.append(epoch)  # 将本轮轮次存入epochy列表
        lossy.append(train(epoch))  # 执行训练，将返回值loss存入lossy列表
        test()  # 每轮训练完都测试一下正确率

    # torch.save(model, f"e:/temp/model_mnist.pth")
    #model = torch.load("C:/Users/yas/Desktop/pytorch/MNIST/model/model1.pth")

    # 可视化一下训练过程
    plt.plot(epochx, lossy)
    plt.grid()
    plt.show()
    pass


go()
pass

#
#
if __name__ == '__main__':
    # go()
    pass
