import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt


# 1. 加载MNIST数据集
def load_data_mnist(batch_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    train_data = datasets.MNIST('./data', train=True, download=True, transform=transform)
    test_data = datasets.MNIST('./data', train=False, download=True, transform=transform)
    print("数据集加载完成！")
    return (
        DataLoader(train_data, batch_size, shuffle=True, num_workers=2),
        DataLoader(test_data, batch_size, shuffle=False, num_workers=2)
    )


# 2. 带Dropout的卷积神经网络模型
class ConvNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 10, 5)  # 输入通道1, 输出通道10, 卷积核5x5
        self.dropout1 = nn.Dropout(0.5)  # 添加50%的Dropout
        self.conv2 = nn.Conv2d(10, 20, 3)  # 输入通道10, 输出通道20, 卷积核3x3
        self.dropout2 = nn.Dropout(0.5)  # 添加50%的Dropout
        self.fc1 = nn.Linear(20 * 10 * 10, 500)  # 第一个全连接层
        self.fc2 = nn.Linear(500, 10)  # 第二个全连接层

    def forward(self, x):
        # 第一卷积层
        out = torch.relu(self.conv1(x))
        out = self.dropout1(out)  # 在激活后添加Dropout
        out = torch.max_pool2d(out, 2, 2)  # 2x2最大池化

        # 第二卷积层
        out = torch.relu(self.conv2(out))
        out = self.dropout2(out)  # 在激活后添加Dropout

        # 全连接层
        out = out.view(out.size(0), -1)  # 扁平化
        out = torch.relu(self.fc1(out))
        out = self.fc2(out)
        return out


# 3. 训练和评估函数
def train_model(net, train_iter, test_iter, num_epochs=10, lr=0.1):
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    train_loss_list = []
    train_acc_list = []
    test_acc_list = []

    print("开始训练...")
    for epoch in range(num_epochs):
        net.train()  # 设置为训练模式(启用Dropout)
        total_loss, total_correct, total_samples = 0, 0, 0

        # 训练阶段
        for X, y in train_iter:
            output = net(X)
            loss = criterion(output, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            total_correct += (output.argmax(dim=1) == y).sum().item()
            total_samples += y.size(0)

        # 计算训练指标
        train_loss = total_loss / len(train_iter)
        train_acc = total_correct / total_samples

        # 测试阶段
        net.eval()  # 设置为评估模式(禁用Dropout)
        test_correct, test_samples = 0, 0
        with torch.no_grad():
            for X, y in test_iter:
                output = net(X)
                test_correct += (output.argmax(dim=1) == y).sum().item()
                test_samples += y.size(0)
        test_acc = test_correct / test_samples

        # 记录指标
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)

        print(f'Epoch {epoch + 1}/{num_epochs}: '
              f'Train Loss: {train_loss:.4f}, '
              f'Train Acc: {train_acc:.3f}, '
              f'Test Acc: {test_acc:.3f}')

    # 可视化结果
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_loss_list, label='Train Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(train_acc_list, label='Train Acc')
    plt.plot(test_acc_list, label='Test Acc')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.tight_layout()
    plt.show()


if __name__ == '__main__':
    # 设置随机种子保证可重复性
    torch.manual_seed(42)

    # 参数设置
    batch_size = 256
    num_epochs = 10
    learning_rate = 0.1

    # 加载数据
    train_iter, test_iter = load_data_mnist(batch_size)

    # 初始化模型
    model = ConvNet()

    # 开始训练
    train_model(model, train_iter, test_iter, num_epochs, learning_rate)