﻿import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.version
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np

print(torch.cuda.is_available())

current_working_directory = os.getcwd()
data_file_path = os.path.join(current_working_directory, "Dataset", "Minist", "File")

input_size = 28  # 图像的总尺寸28*28
numclasses = 10  # 标签的种类数
num_epochs = 3  # 训练的总循环周期
batch_size = 64  # 一个撮(批次)的大小，64张图片

# 训练集
train_dataset = datasets.MNIST(
    root=data_file_path,
    train=True,
    transform=transforms.ToTensor(),
    # , download=True
)

# # 获取第一个样本
# image, label = train_dataset[1]

# # 图像是一个 Tensor，需要转换为 numpy 数组才能用 matplotlib 显示
# image_np = image.numpy().squeeze()

# # 显示图像
# plt.imshow(image_np, cmap='gray')
# plt.title(f'Label: {label}')
# plt.show()


# 测试集
test_dataset = datasets.MNIST(
    root=data_file_path, train=False, transform=transforms.ToTensor()
)
# 构建batcb数据
train_loader = torch.utils.data.DataLoader(
    dataset=train_dataset, batch_size=batch_size, shuffle=True
)
test_loader = torch.utils.data.DataLoader(
    dataset=test_dataset, batch_size=batch_size, shuffle=True
)


class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Sequential(  # 输入大小(1，28，28)
            nn.Conv2d(
                in_channels=1,  # 灰度图
                out_channels=16,  # 要得到几多少个特征图
                kernel_size=5,  # 卷积核大小
                stride=1,  # 步长
                padding=2,  # 如果希望卷积后大小跟原来一样，需要设置padding=(kernel size-l)/2 if stride=1
            ),  # 输出的特征图为(16、28.28)
            nn.ReLU(),  # relu层
            nn.MaxPool2d(
                kernel_size=2
            ),  # 进行池化操作(2x2 区域)，输出结果为:(16，14，14)
        )
        self.conv2 = nn.Sequential(  # 下一个套餐的输入(16，14，14)
            nn.Conv2d(16, 32, 5, 1, 2),  # 输出(32、14、14)
            nn.ReLU(),  # relu层
            nn.MaxPool2d(2),  # 输出(32、7，7)
        )

        self.out = nn.Linear(32 * 7 * 7, 10)  # 全连接层得到的结果

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)  # flatten操作，结果为:(batchsize，32*7*7)
        output = self.out(x)
        return output


def accuracy(predictions, labels):
    pred = torch.max(predictions.data, 1)[1]
    rights = pred.eq(labels.data.view_as(pred)).sum()
    return rights, len(labels)


# 实例化
net = CNN()

# # 加载保存的参数
# net.load_state_dict(torch.load('model_parameters.pth'))

# 损失函数
criterion = nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(
    net.parameters(), lr=0.001
)  # 定义优化器，普通的随机梯度下降算法

# 开始训练循环
for epoch in range(num_epochs):
    # 当前epoch的结果保存下来
    train_rights = []
    for batch_idx, (data, target) in enumerate(
        train_loader
    ):  # 针对容器中的每一个批进行循环
        net.train()  # 训练模式

        output = net(data)  # torch.size([64, 10])
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        right = accuracy(output, target)
        train_rights.append(right)

        if batch_idx % 100 == 0:
            net.eval()  # 评估模式
            val_rights = []

            for data, target in test_loader:
                output = net(data)
                right = accuracy(output, target)
                val_rights.append(right)

            # 准确率计算
            train_r = (
                sum([tup[0] for tup in train_rights]),
                sum([tup[1] for tup in train_rights]),
            )

            val_r = (
                sum([tup[0] for tup in val_rights]),
                sum([tup[1] for tup in val_rights]),
            )
            print(
                "当前epoch: {} [{}/{}({:.0f}%)]\t损失:{:.6f}\t训练集准确率:{:.2f}%\t测试集正确率:{:.2f}%".format(
                    epoch,
                    batch_idx * batch_size,
                    len(train_loader.dataset),
                    100.0 * batch_idx / len(train_loader),
                    loss.data,
                    100.0 * train_r[0].numpy() / train_r[1],
                    100.0 * val_r[0].numpy() / val_r[1],
                )
            )

mechine_file_path = os.path.join(
    current_working_directory,
    "Pytorch",
    "Network",
    "Cnn",
    "Machine",
    "model_parameters.pth",
)
torch.save(net.state_dict(), mechine_file_path)