import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt


# 数据加载部分（保持不变）
def load_data_mnist(batch_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    train_data = datasets.MNIST('./data', train=True, download=True, transform=transform)
    test_data = datasets.MNIST('./data', train=False, download=True, transform=transform)
    return (
        DataLoader(train_data, batch_size, shuffle=True, num_workers=2),
        DataLoader(test_data, batch_size, shuffle=False, num_workers=2)
    )


# ✅ 优化后的深度MLP + 残差连接
class DeepResidualMLP(nn.Module):
    def __init__(self):
        super().__init__()
        # 主路径（3层MLP）
        self.main = nn.Sequential(
            nn.Linear(784, 512),
            nn.BatchNorm1d(512),  # 添加BN稳定训练
            nn.ReLU(),
            nn.Dropout(0.3),

            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),

            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
        )

        # 残差路径（直接从输入跳到第三层后）
        self.residual = nn.Sequential(
            nn.Linear(784, 128),
            nn.BatchNorm1d(128)  # 残差也加BN，保证数值稳定
        )

        # 输出层
        self.output = nn.Linear(128, 10)

        # 初始化（Kaiming + 偏置归零）
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                nn.init.zeros_(m.bias)

    def forward(self, x):
        x_flat = torch.flatten(x, 1)
        main_out = self.main(x_flat)
        res_out = self.residual(x_flat)
        # 残差连接（主路径 + 残差）
        return self.output(main_out + res_out)  # 直接相加，不再缩放


# ✅ 训练部分（使用SGD优化器，但调整超参数）
def train_model(net, train_iter, test_iter, num_epochs=10, lr=0.05):  # 提高学习率
    optimizer = torch.optim.SGD(
        net.parameters(),
        lr=lr,
        momentum=0.9,  # 添加动量（加速收敛）
        weight_decay=1e-4  # 轻微L2正则
    )
    criterion = nn.CrossEntropyLoss()

    train_loss_list, train_acc_list, test_acc_list = [], [], []

    for epoch in range(num_epochs):
        net.train()
        total_loss, total_correct = 0, 0

        for X, y in train_iter:
            optimizer.zero_grad()
            output = net(X)
            loss = criterion(output, y)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            total_correct += (output.argmax(1) == y).sum().item()

        train_loss = total_loss / len(train_iter)
        train_acc = total_correct / len(train_iter.dataset)

        # 测试集评估
        net.eval()
        test_correct = 0
        with torch.no_grad():
            for X, y in test_iter:
                test_correct += (net(X).argmax(1) == y).sum().item()
        test_acc = test_correct / len(test_iter.dataset)

        # 记录并打印
        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print(f'Epoch {epoch + 1}: Train Loss: {train_loss:.4f}, Acc: {train_acc:.3f} | Test Acc: {test_acc:.3f}')

    # 绘制曲线
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_loss_list, label='Train Loss')
    plt.xlabel('Epoch')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(train_acc_list, label='Train Acc')
    plt.plot(test_acc_list, label='Test Acc')
    plt.xlabel('Epoch')
    plt.legend()

    plt.tight_layout()
    plt.show()


# 主程序
if __name__ == '__main__':
    torch.manual_seed(42)  # 固定随机种子
    batch_size = 256
    num_epochs = 10
    lr = 0.1  # SGD需要更大的学习率

    train_iter, test_iter = load_data_mnist(batch_size)
    model = DeepResidualMLP()  # 使用优化后的模型
    train_model(model, train_iter, test_iter, num_epochs, lr)