import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts


# 1. 增强版数据预处理
def load_data_mnist(batch_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,)),
        transforms.RandomAffine(degrees=5, translate=(0.1, 0.1))  # 轻微数据增强
    ])

    train_data = datasets.MNIST('./data', train=True, download=True, transform=transform)
    test_data = datasets.MNIST('./data', train=False, download=True,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.1307,), (0.3081,))
                               ]))

    return (
        DataLoader(train_data, batch_size, shuffle=True, num_workers=4, pin_memory=True),
        DataLoader(test_data, batch_size * 2, shuffle=False, num_workers=4)
    )


# 2. 改进模型结构
class EnhancedNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.net = nn.Sequential(
            nn.Flatten(),
            nn.Linear(784, 512),
            nn.BatchNorm1d(512),
            nn.LeakyReLU(0.1),
            nn.Dropout(0.2),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.LeakyReLU(0.1),
            nn.Linear(256, 10)
        )
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
                nn.init.zeros_(m.bias)

    def forward(self, x):
        return self.net(x)


# 3. 三阶段优化训练方案
def train_enhanced(model, train_loader, test_loader, epochs=15):
    # 阶段1：SGD warmup (5 epochs)
    optimizer = optim.SGD([
        {'params': [p for n, p in model.named_parameters() if 'weight' in n], 'lr': 0.1},
        {'params': [p for n, p in model.named_parameters() if 'bias' in n], 'lr': 0.2}
    ], momentum=0.9, nesterov=True)

    # 阶段2：切换AdamW (5 epochs)
    adam_optimizer = optim.AdamW([
        {'params': [p for n, p in model.named_parameters() if 'weight' in n], 'lr': 0.001, 'weight_decay': 0.01},
        {'params': [p for n, p in model.named_parameters() if 'bias' in n], 'lr': 0.002, 'weight_decay': 0}
    ])

    # 阶段3：Cosine退火 (最后5 epochs)
    scheduler = CosineAnnealingWarmRestarts(adam_optimizer, T_0=5, T_mult=1, eta_min=1e-5)

    criterion = nn.CrossEntropyLoss()
    history = {'loss': [], 'train_acc': [], 'test_acc': []}

    for epoch in range(epochs):
        # 优化器切换逻辑
        if epoch == 5:
            optimizer = adam_optimizer
            print("\n切换到AdamW优化器")
        elif epoch == 10:
            print("\n启用Cosine学习率调度")

        model.train()
        total_loss = 0
        correct = 0

        for x, y in train_loader:
            optimizer.zero_grad()
            outputs = model(x)
            loss = criterion(outputs, y)
            loss.backward()

            # 梯度裁剪
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)

            optimizer.step()
            if epoch >= 10:
                scheduler.step()

            total_loss += loss.item()
            correct += (outputs.argmax(1) == y).sum().item()

        # 评估
        train_acc = correct / len(train_loader.dataset)
        test_acc = evaluate(model, test_loader)

        history['loss'].append(total_loss / len(train_loader))
        history['train_acc'].append(train_acc)
        history['test_acc'].append(test_acc)

        print(f'Epoch {epoch + 1:2d} | Loss: {total_loss / len(train_loader):.4f} | '
              f'Train Acc: {train_acc:.3f} | Test Acc: {test_acc:.3f} | '
              f'LR: {optimizer.param_groups[0]["lr"]:.2e}')

    # 可视化
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(history['loss'], label='Loss')
    plt.title('Training Loss')
    plt.subplot(1, 2, 2)
    plt.plot(history['train_acc'], label='Train Acc')
    plt.plot(history['test_acc'], label='Test Acc')
    plt.title('Accuracy')
    plt.legend()
    plt.tight_layout()
    plt.show()


def evaluate(model, loader):
    model.eval()
    correct = 0
    with torch.no_grad():
        for x, y in loader:
            correct += (model(x).argmax(1) == y).sum().item()
    return correct / len(loader.dataset)


if __name__ == '__main__':
    torch.manual_seed(42)
    torch.backends.cudnn.benchmark = True  # 启用CUDA优化

    # 参数配置
    batch_size = 512
    epochs = 10

    # 数据加载
    train_loader, test_loader = load_data_mnist(batch_size)

    # 初始化模型
    model = EnhancedNN()

    # 训练
    train_enhanced(model, train_loader, test_loader, epochs)