# 1加载必要的库
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import  matplotlib.pyplot as plt
# 2定义超参数(参数：由模型学习来决定的)数据太多一次放不完，切割
BATCH_SIZE = 128  # 每批处理的数据
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # CPU还是GPU？
EPOCHS = 100
# 3构建transform， 对图像进行各种处理(旋转拉伸，放大缩小等)
tranform = transforms.Compose([
    transforms.ToTensor(),  # 将图片转换成Tensor
    transforms.Normalize((0.1307,), (0.3081,))  # 均值和方差，正则化(对抗过拟合)：降低模型复杂度
])
# 4下载、加载数据集
from torch.utils.data import DataLoader

train_data = datasets.MNIST(root="A:/Pycharm_Project/cnn_mnist/data/mnist/MNIST/raw",
                            train=True,
                            transform=tranform,
                            download=True)
test_data = datasets.MNIST(root="A:/Pycharm_Project/cnn_mnist/data/mnist/MNIST/raw",
                           train=False,
                           transform=tranform,
                           download=True)
train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True)


# 5构建网络模型
class Digit(nn.Module):
    def __init__(self):  # 继承父类
        super().__init__()
        self.conv1 = nn.Conv2d(1, 10, 5)  # 输入通道，输出通道，5*5 kernel
        self.conv2 = nn.Conv2d(10, 20, 3)
        self.fc1 = nn.Linear(20 * 10 * 10, 500)  # 全连接层，输入通道， 输出通道
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):  # 前馈
        input_size = x.size(0)  # 得到batch_size
        x = self.conv1(x)  # 输入：batch*1*28*28, 输出：batch*10*24*24(28-5+1)
        x = F.relu(x)  # 使表达能力更强大激活函数, 输出batch*10*24*24
        x = F.max_pool2d(x, 2, 2)  # 最大池化层，输入batch*10*24*24，输出batch*10*12*12
        x = self.conv2(x)  # 输入batch*10*12*12，输出batch*20*10*10
        x = F.relu(x)
        x = x.view(input_size, -1)  # 拉平， 自动计算维度，20*10*10= 2000
        x = self.fc1(x)  # 输入batch*2000,输出batch*500
        x = F.relu(x)
        x = self.fc2(x)  # 输入batch*500 输出batch*10
        output = F.log_softmax(x, dim=1)  # 计算分类后每个数字的概率值
        return output
# 6定义优化器
model = Digit().to(DEVICE)  # 创建模型部署到DEVICE
optimizer = optim.Adam(model.parameters())
# 定义函数将模型保存到本地保存模型
def save_model(model, model_name):
    torch.save(model.state_dict(), model_name)  # 保存模型参数
    print("Save model to {}".format(model_name))  # 打印保存信息

import os
# 7定义训练方法
def train_model(model, device, train_loader, optimizer, epoch):
    model.train()  # PyTorch提供的训练方法
    # 加载已经训练好的模型参数
    #如果存在模型参数，则加载模型参数
    if os.path.exists("model.pth"):
        model.load_state_dict(torch.load("model.pth"))
    for batch_index, (data, label) in enumerate(train_loader):
        # 部署到DEVICE
        data, label = data.to(device), label.to(device)
        # 梯度初始化为0
        optimizer.zero_grad()
        # 训练后的结果
        output = model(data)
        # 计算损失（针对多分类任务交叉熵，二分类用sigmoid）
        loss = F.cross_entropy(output, label)
        # 找到最大概率的下标
        pred = output.argmax(dim=1)
        # 反向传播Backpropagation
        loss.backward()
        # 参数的优化
        optimizer.step()
        if batch_index % 3000 == 0:
            print("Train Epoch : {} /t Loss : {:.6f}".format(epoch, loss.item()))
        # 保存模型
    save_model(model, "model.pth")

def test_model(model, device, test_loader, num_images=10):
    # 8定义测试方法
    model.eval()
    correct = 0.0
    test_loss = 0.0
    all_preds = []
    all_labels = []
    all_images = []
    with torch.no_grad():
        for data, label in test_loader:
            data, label = data.to(device), label.to(device)
            output = model(data)
            test_loss += F.cross_entropy(output, label, reduction='sum').item()
            pred = output.argmax(dim=1)
            correct += pred.eq(label.view_as(pred)).sum().item()
            all_preds.extend(pred.cpu().numpy())
            all_labels.extend(label.cpu().numpy())
            all_images.extend(data.cpu())
    test_loss /= len(test_loader.dataset)
    accuracy = 100.0 * correct / len(test_loader.dataset)
    print("Test —— Average loss : {:.4f}, Accuracy : {:.3f}%".format(test_loss, accuracy))
    # 展示一些测试图像及其预测
    display_images(all_images, all_preds, all_labels, num_images)
def display_images(images, preds, labels, num_images):
    fig, axes = plt.subplots(1, num_images, figsize=(15, 15))
    for i in range(num_images):
        ax = axes[i]
        image = images[i].squeeze()  # Remove the batch dimension
        ax.imshow(image, cmap='gray')
        ax.set_title(f"Pred: {preds[i]}, True: {labels[i]}", fontsize=10) #标题字体重叠，需要调整字体大小
        # 调整字体大小
        ax.axis('off')
    #保存图片到文件
    plt.savefig("test_images.png")
    plt.show()


if __name__ == '__main__':
    print("按y键开始训练，按下其他开始测试")
    if input() == 'y':
        for epoch in range(1, EPOCHS + 1):
            train_model(model, DEVICE, train_loader, optimizer, epoch)
    else:
        test_model(model, DEVICE, test_loader)
        #test_model(model, DEVICE, test_loader)
