'''
手写数字识别代码, 使用PyTorch框架
运行命令: python pytorch.py
tensorboard命令: tensorboard --logdir=./logs
'''
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
from torchsummary import summary
from torch.utils.tensorboard import SummaryWriter

# 超参数设置
BATCH_SIZE = 128 # 批大小
NUM_EPOCHS = 10 # 训练轮数
device = 'cuda' # 运行设备，'cuda'表示GPU，'cpu'表示CPU

# 创建 tensorboard log文件
writer = SummaryWriter('logs')

# 数据归一化预处理
normalize = transforms.Normalize(mean=[0.5], std=[0.5]) # 归一化：减去均值，除以标准差，像素值范围变为[-1, 1]
transform = transforms.Compose([transforms.ToTensor(), normalize]) # 组合操作：图像转换为张量+归一化

# 准备数据集
train_dataset = torchvision.datasets.MNIST(root='./mnist/', train=True, transform=transform, download=True) # MNIST训练集对象：指定位置，是否为训练集，应用已定义变换操作，是否下载
test_dataset = torchvision.datasets.MNIST(root='./mnist/', train=False, transform=transform, download=False) # MNIST测试集对象

# 准备数据加载器，用于批量加载数据集
train_loader = data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True) # 训练集数据加载器：数据集对象，每批次样本数，洗牌增加随机性，丢弃最后一个不完整批次
test_loader = data.DataLoader(test_dataset, batch_size=1, shuffle=False, drop_last=True) # 测试集数据加载器，批大小1便于可视化

# 定义神经网络模型，继承自nn.Module
class SimpleNet(nn.Module):
    # 构造方法
    def __init__(self):
        super(SimpleNet,self).__init__() # 调用父类的构造方法，初始化继承属性和方法
        # 顺序容器对象定义的卷积块：2d卷积层+ReLU激活层+2d最大池化层
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(1, 32, 3, 1, 1), # 2d卷积层：输入通道channel1，输出通道channel32，卷积核3*3，步幅slide1，填充padding1(1圈填充，输出尺寸不变) BATCH_SIZE*1*28*28->BATCH_SIZE*32*28*28
            torch.nn.ReLU(), # ReLU激活层
            torch.nn.MaxPool2d(2) # 2d最大池化层：窗口2*2，降低2倍尺寸(原始数据28*28*1) BATCH_SIZE*32*28*28->BATCH_SIZE*32*14*14
        )
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(32, 64, 3, 1, 1), # BATCH_SIZE*32*14*14->BATCH_SIZE*64*14*14
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2) # BATCH_SIZE*64*14*14->BATCH_SIZE*64*7*7
        )
        self.conv3 = torch.nn.Sequential(
            torch.nn.Conv2d(64, 64, 3, 1, 1), # BATCH_SIZE*64*7*7->BATCH_SIZE*64*7*7
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2) # 最后1行和列被忽略 BATCH_SIZE*64*7*7->BATCH_SIZE*64*3*3
        )
        # 顺序容器对象定义的全连接块：线性层+ReLU激活层+线性层
        self.dense = torch.nn.Sequential(
            torch.nn.Linear(3*3*64, 128), # 线性层：展平后的1维数据，映射到128元素的向量 BATCH_SIZE*576->BATCH_SIZE*128
            torch.nn.ReLU(),
            torch.nn.Linear(128, 10) # BATCH_SIZE*128->BATCH_SIZE*10
        )

    # 前向传播方法
    def forward(self,x):
        conv1_out = self.conv1(x)
        conv2_out = self.conv2(conv1_out)
        conv3_out = self.conv3(conv2_out)
        res = conv3_out.view(conv3_out.size(0), -1) # 展平数据为1维，输入线性层 -1表示自动计算纬度 tensor结构：(BATCH_SIZE, channels, x, y) BATCH_SIZE*64*3*3->BATCH_SIZE*576
        out = self.dense(res)
        return out

model = SimpleNet().to(device)

# 打印神经网络的结构
# print(model)

# 打印神经网络的参数数量
print("Number of parameters: ", sum(p.numel() for p in model.parameters()))

# 使用torchsummary可视化神经网络的结构，输入数据是1通道28*28图像
summary(model, input_size = (1, 28, 28), device = device)

# 定义损失函数和优化器
criterion = torch.nn.CrossEntropyLoss() # 计算交叉熵损失
optimizer = torch.optim.Adam(model.parameters()) # 定义优化器，使用Adam优化器，默认自动调整学习率衰减策略

# 训练和评估
for epoch in range(NUM_EPOCHS):
    # 训练
    model.train()
    train_loss = 0
    train_acc = 0
    for images, labels in tqdm(train_loader): # 创建进度条，输入迭代器为数据加载器
        images, labels = images.to(device), labels.to(device) # 将图像和标签数据转移到设备
        out = model(images) # 图像输入到模型
        loss = criterion(out, labels) # 计算单批训练损失
        train_loss += loss.item() # 训练损失总和：单批训练损失求和
        predict = torch.max(out, 1)[1]# 单批预测结果：输入两个参数(张量，0返回每列的最大值/1返回每行的最大值)，返回两个1维张量(每行的最大值，每行最大值的索引)，使用[1]返回第二个张量(即预测结果0~9) (BATCH_SIZE, 10)->(BATCH_SIZE, ), (BATCH_SIZE, )
        train_correct = (predict == labels).sum() # 单批训练正确量：预测和标签1维张量逐元素比较，返回布尔数组，对布尔数组求和(true=1/false=0)，输出单批预测与标签相等的数量
        train_acc += train_correct.item() # 训练正确总和：单批训练正确量求和
        optimizer.zero_grad() # 梯度清零
        loss.backward() # 计算损失函数的梯度
        optimizer.step() # 使用优化器更新模型参数
    print('epoch:{}, Train loss:{:.6f}, accuracy:{:.6f}'.format(epoch, train_loss/(len(train_dataset)), train_acc/(len(train_dataset))))

    # 评估
    model.eval()
    test_loss = 0
    test_acc = 0
    for images, labels in tqdm(test_loader):
        images, labels = images.to(device), labels.to(device)
        out = model(images)
        loss = criterion(out, labels)
        test_loss += loss.item()
        predict = torch.max(out,1)[1]
        test_correct = (predict == labels).sum()
        test_acc += test_correct.item()

        # # 最后一轮评估时，打印图像和预测结果
        # if epoch == NUM_EPOCHS - 1:
        #     image = np.array(images[0,0,:].cpu())
        #     print(f'label : {labels.item()}, predict : {predict.item()}') # 在控制台显示标签结果和预测结果
        #     cv2.imshow('image', image) # 在窗口显示图像
        #     # cv2.imwrite('image.jpg', image) # 保存图像到当前目录
        #     cv2.waitKey(0)
        #     cv2.destroyAllWindows()
    print('epoch:{}, Test loss:{:.6f}, accuracy:{:.6f}'.format(epoch, test_loss/(len(test_dataset)), test_acc/(len(test_dataset))))

    # 绘制 tensorboard 准确率图像
    writer.add_scalars(
        main_tag="accuracy",
        tag_scalar_dict={
            "train": train_acc/(len(train_dataset)),
            "evaluate": test_acc/(len(test_dataset))
        },
        global_step = epoch
    )

writer.close()
