import torch
from torch import nn
# 从torchvision导入数据集和数据预处理工具
from torchvision import datasets, transforms
# 从torch.utils.data导入DataLoader：用于批量加载数据
from torch.utils.data import DataLoader
# 导入PyTorch的函数式API：提供激活函数等无状态操作
import torch.nn.functional as F
# 导入PyTorch优化器库：提供SGD等优化算法
import torch.optim as optim
# 导入matplotlib绘图库：用于绘制训练损失和准确率曲线
import matplotlib.pyplot as plt

batch_size = 64
learning_rate = 0.01
momentum = 0.5
epochs = 10

# 设置中文显示：解决matplotlib绘图时中文乱码问题
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams["axes.unicode_minus"] = False

# 定义数据预处理流水线
transform = transforms.Compose([
    transforms.ToTensor(),  # 将图像转为PyTorch张量并归一化到[0,1]
    transforms.Normalize((0.1307,), (0.3081,))  # 使用MNIST数据集的均值和标准差进行标准化
])

train_dataset = datasets.MNIST(
    root='../dataset/mnist',
    train=True,
    transform=transform,
    download=True
)

train_loader = DataLoader(
    dataset=train_dataset,
    batch_size=batch_size,
    shuffle=True
)

test_dataset = datasets.MNIST(
    root='../dataset/mnist',
    train=False,
    transform=transform,
    download=True
)

test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=batch_size,
    shuffle=False
)


class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.channels = channels
        self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)

    def forward(self, x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        return F.relu(x + y)  # 残差连接：输入x与卷积结果y相加


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=5)  # 输入1通道（灰度图），输出16通道
        self.conv2 = nn.Conv2d(16, 32, kernel_size=5)  # 输入16通道，输出32通道
        self.mp = nn.MaxPool2d(2)  # 2x2最大池化

        self.rblock1 = ResidualBlock(16)  # 残差块1（输入输出均为16通道）
        self.rblock2 = ResidualBlock(32)  # 残差块2（输入输出均为32通道）

        self.fc = nn.Linear(512, 10)  # 全连接层：输入512特征，输出10类（数字0-9）

    def forward(self, x):
        in_size = x.size(0)  # 获取批次大小

        # 第一层：卷积 -> ReLU -> 池化
        x = self.mp(F.relu(self.conv1(x)))  # 输出形状：[batch_size, 16, 12, 12]
        x = self.rblock1(x)  # 残差块处理，形状保持不变

        # 第二层：卷积 -> ReLU -> 池化
        x = self.mp(F.relu(self.conv2(x)))  # 输出形状：[batch_size, 32, 4, 4]
        x = self.rblock2(x)  # 残差块处理，形状保持不变

        # 展平特征图：[batch_size, 32*4*4=512]
        x = x.view(in_size, -1)
        x = self.fc(x)  # 输出分类结果
        return x


# 选择合适的计算设备
if torch.cuda.is_available():
    device = torch.device("cuda")
elif torch.backends.mps.is_available():
    device = torch.device("mps")
else:
    device = torch.device("cpu")
print(f"使用计算设备: {device}")
model = Net().to(device)  # 将模型转移到选定设备

# 定义损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)


def train(epoch):
    model.train()  # 设置模型为训练模式
    running_loss = 0.0
    total_batches = len(train_loader)

    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data
        # 将数据转移到设备
        inputs, target = inputs.to(device), target.to(device)

        optimizer.zero_grad()  # 梯度清零

        # 前向传播 + 计算损失 + 反向传播 + 参数更新
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

        # 每300批次打印一次损失
        if batch_idx % 300 == 299:
            print(f'[{epoch + 1}, {batch_idx + 1:5d}] loss: {running_loss / 300:.3f}')
            running_loss = 0.0  # 重置累计损失

    # 计算本轮平均损失（修正：使用总损失/总批次）
    avg_loss = (running_loss + (300 * (total_batches // 300))) / total_batches
    return avg_loss


def test():
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0

    with torch.no_grad():  # 测试时不计算梯度
        for data in test_loader:
            images, labels = data
            # 修复：将测试数据转移到设备（与模型同设备）
            images, labels = images.to(device), labels.to(device)

            outputs = model(images)  # 模型推理
            _, predicted = torch.max(outputs.data, dim=1)  # 获取预测结果

            total += labels.size(0)  # 累计总样本数
            correct += (predicted == labels).sum().item()  # 累计正确样本数

    accuracy = 100 * correct / total
    print(f'测试集准确率: {accuracy:.2f}%')
    return accuracy / 100  # 返回小数形式的准确率


if __name__ == '__main__':
    # 初始化记录列表
    epoch_list = []
    loss_list = []
    accuracy_list = []

    # 训练循环
    for epoch in range(epochs):
        avg_loss = train(epoch)
        accuracy = test()

        # 记录数据
        epoch_list.append(epoch + 1)
        loss_list.append(avg_loss)
        accuracy_list.append(accuracy)

    # 绘制训练损失和测试准确率曲线
    fig, ax1 = plt.subplots(figsize=(10, 6))

    # 绘制训练损失曲线
    ax1.plot(epoch_list, loss_list, 'b-', linewidth=2, label='训练损失')
    ax1.set_xlabel('训练轮次')
    ax1.set_ylabel('训练损失', color='b')
    ax1.tick_params(axis='y', labelcolor='b')

    # 创建第二个y轴用于绘制准确率
    ax2 = ax1.twinx()
    ax2.plot(epoch_list, accuracy_list, 'r-', linewidth=2, label='测试准确率')
    ax2.set_ylabel('测试准确率', color='r')
    ax2.tick_params(axis='y', labelcolor='r')
    ax2.set_ylim(0.95, 1.0)  # 调整准确率显示范围（80%-100%）

    # 添加图例和标题
    lines1, labels1 = ax1.get_legend_handles_labels()
    lines2, labels2 = ax2.get_legend_handles_labels()
    ax1.legend(lines1 + lines2, labels1 + labels2, loc='best')
    plt.title('残差网络MNIST分类训练损失与测试准确率')

    # 显示图像
    plt.tight_layout()
    plt.show()
