

import torch
from torch import nn, device
from torch.optim import optimizer
# 从torchvision导入数据集、预训练模型和数据预处理工具
from torchvision import datasets, transforms
# 从torch.utils.data导入DataLoader：用于批量加载数据
from torch.utils.data import DataLoader
# 导入PyTorch的函数式API：提供激活函数等无状态操作
import torch.nn.functional as F
# 导入PyTorch优化器库：提供SGD等优化算法
import torch.optim as optim
# 导入matplotlib绘图库：用于绘制训练损失和准确率曲线
import matplotlib.pyplot as plt

batch_size = 64
learning_rate = 0.01
momentum = 0.5
epochs = 10

# 设置中文显示：解决matplotlib绘图时中文乱码问题
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams["axes.unicode_minus"] = False

# 定义数据预处理流水线
transform = transforms.Compose([
    transforms.ToTensor(),  # 将图像转为PyTorch张量并归一化到[0,1]
    transforms.Normalize((0.1307,), (0.3081,))  # 使用MNIST数据集的均值和标准差进行标准化
])

train_dataset = datasets.MNIST(
    root='../dataset/mnist',
    train=True,
    transform=transform,
    download=True
)

train_loader = DataLoader(
    dataset=train_dataset,
    batch_size=batch_size,
    shuffle=True
)

test_dataset = datasets.MNIST(
    root='../dataset/mnist',
    train=False,
    transform=transform,
    download=True
)

test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=batch_size,
    shuffle=False
)

class InceptionNet(nn.Module):
    def __init__(self, in_channels):
        super(InceptionNet, self).__init__()
        self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)

        self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5,padding=2)

        self.branch3x3_1= nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3,padding=1)
        self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3,padding=1)

        self.branch_pool =  nn.Conv2d(in_channels, 24, kernel_size=1)

    def forward(self, x):
        branch1x1 = self.branch1x1(x)

        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)

        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)

        branch_pool = F.avg_pool2d(x,kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)

        outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
        return torch.cat(outputs, dim=1)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(88, 20, kernel_size=5)  # 88 = 24x3 + 16

        self.incep1 = InceptionNet(in_channels=10)  # 与conv1 中的10对应
        self.incep2 = InceptionNet(in_channels=20)  # 与conv2 中的20对应

        self.mp = nn.MaxPool2d(2)
        self.fc = nn.Linear(1408, 10)

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(in_size, -1)
        x = self.fc(x)

        return x

# 选择合适的计算设备
if torch.cuda.is_available():
    device = torch.device("cuda")
elif torch.backends.mps.is_available():
    device = torch.device("mps")
else:
    device = torch.device("cpu")
print(f"使用计算设备: {device}")
model = Net().to(device)

criterion = nn.CrossEntropyLoss()
# 定义优化器（带动量的SGD）
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)

def train(epoch):
    model.train()
    running_loss = 0.0
    total_batches = len(train_loader)

    for batch_idx, data in enumerate(train_loader):
        inputs, target = data
        inputs, target = inputs.to(device), target.to(device)

        optimizer.zero_grad()

        outputs = model(inputs)
        loss = criterion(outputs, target)

        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        if batch_idx % 100 == 0:
            print(f'训练轮次: [{epoch + 1}/{epochs}], 批次: [{batch_idx}/{total_batches}], 损失: {loss.item():.6f}')
    return running_loss / total_batches

def test():
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for data in test_loader:
            inputs, target = data
            inputs, target = inputs.to(device), target.to(device)
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += target.size(0)
            correct += (predicted == target).sum().item()

    accuracy = 100 * correct / total
    print(f'测试准确率: {accuracy:.2f}% ({correct}/{total})\n')
    return accuracy

if __name__ == '__main__':
    # 初始化记录列表
    epoch_list = []
    loss_list = []
    accuracy_list = []

    # 训练循环
    for epoch in range(epochs):
        # 训练模型
        avg_loss = train(epoch)
        # 测试模型
        accuracy = test()

        # 记录数据
        epoch_list.append(epoch + 1)
        loss_list.append(avg_loss)
        accuracy_list.append(accuracy)

    # 绘制训练损失和测试准确率曲线
    fig, ax1 = plt.subplots(figsize=(10, 6))

    # 绘制训练损失曲线
    ax1.plot(epoch_list, loss_list, 'b-', linewidth=2, label='训练损失')
    ax1.set_xlabel('训练轮次')
    ax1.set_ylabel('训练损失', color='b')
    ax1.tick_params(axis='y', labelcolor='b')

    # 创建第二个y轴用于绘制准确率
    ax2 = ax1.twinx()
    ax2.plot(epoch_list, accuracy_list, 'r-', linewidth=2, label='测试准确率')
    ax2.set_ylabel('测试准确率 (%)', color='r')
    ax2.tick_params(axis='y', labelcolor='r')
    ax2.set_ylim(90, 100)  # 准确率范围限制在0-100%

    # 添加图例和标题
    lines1, labels1 = ax1.get_legend_handles_labels()
    lines2, labels2 = ax2.get_legend_handles_labels()
    ax1.legend(lines1 + lines2, labels1 + labels2, loc='best')
    plt.title('MNIST分类模型训练损失与测试准确率')

    # 显示图像
    plt.tight_layout()
    plt.show()
