import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import os
import time

# 设置随机种子保证可复现性
torch.manual_seed(42)
np.random.seed(42)


# 1. 数据预处理和加载
def load_data():
    """
    加载MNIST数据集并进行预处理：
    1. 调整尺寸为AlexNet原始输入224x224
    2. 转换为Tensor并归一化到[0,1]
    3. 使用MNIST的均值和标准差进行标准化
    """
    transform = transforms.Compose([
        transforms.Resize((224, 224)),  # AlexNet需要224x224输入
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))  # MNIST的均值和标准差
    ])

    # 下载并加载数据集
    train_data = datasets.MNIST(
        root='./data', train=True, download=True, transform=transform)
    test_data = datasets.MNIST(
        root='./data', train=False, download=True, transform=transform)

    # 创建数据加载器
    batch_size = 128
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    return train_loader, test_loader


# 2. 实现AlexNet模型（适配MNIST的修改版）
class AlexNetMNIST(nn.Module):
    def __init__(self):
        super(AlexNetMNIST, self).__init__()
        # 特征提取部分
        self.features = nn.Sequential(
            # 第一卷积层：输入1通道（灰度），输出64通道
            nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2),
            nn.BatchNorm2d(64),  # 批归一化
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第二卷积层：64通道→192通道
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.BatchNorm2d(192),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 连续3个卷积层
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.BatchNorm2d(384),
            nn.ReLU(inplace=True),

            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),

            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )

        # 分类器部分
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),  # 原始AlexNet使用Dropout防止过拟合
            nn.Linear(256 * 6 * 6, 4096),
            nn.BatchNorm1d(4096),  # 批归一化
            nn.ReLU(inplace=True),

            nn.Dropout(0.5),
            nn.Linear(4096, 4096),
            nn.BatchNorm1d(4096),
            nn.ReLU(inplace=True),

            nn.Linear(4096, 10),  # 输出层，MNIST有10类
        )

    def forward(self, x):
        x = self.features(x)  # 特征提取
        x = torch.flatten(x, 1)  # 展平多维特征图
        x = self.classifier(x)  # 分类
        return x


# 3. 实现自定义简化网络（对比用）
class CustomNet(nn.Module):
    def __init__(self):
        super(CustomNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Linear(64 * 56 * 56, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, 10)
        )

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


# 4. 训练函数（含L2正则化）
def train(model, device, train_loader, optimizer, epoch, l2_lambda=0.0001):
    """
    训练模型一个epoch
    参数:
        model: 要训练的模型
        device: 训练设备 (CPU/GPU)
        train_loader: 训练数据加载器
        optimizer: 优化器
        epoch: 当前epoch数
        l2_lambda: L2正则化系数
    返回:
        train_loss: 平均训练损失
        accuracy: 训练准确率百分比
    """
    model.train()
    total_loss = 0
    correct = 0
    total_samples = 0

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()

        # 前向传播
        output = model(data)

        # 计算损失
        loss = nn.functional.cross_entropy(output, target)

        # 添加L2正则化
        if l2_lambda > 0:
            l2_reg = torch.tensor(0.).to(device)
            for param in model.parameters():
                l2_reg += torch.norm(param)
            loss += l2_lambda * l2_reg

        # 反向传播
        loss.backward()
        optimizer.step()

        # 更新统计信息
        total_loss += loss.item() * data.size(0)
        _, predicted = output.max(1)
        correct += predicted.eq(target).sum().item()
        total_samples += data.size(0)

        # 每100个batch打印进度
        if batch_idx % 100 == 0:
            print(f'Epoch: {epoch} | Batch: {batch_idx}/{len(train_loader)} | '
                  f'Loss: {loss.item():.4f}')

    # 计算平均损失和准确率
    avg_loss = total_loss / total_samples
    accuracy = 100. * correct / total_samples
    return avg_loss, accuracy


# 5. 测试函数
def test(model, device, test_loader):
    """
    测试模型性能
    参数:
        model: 要测试的模型
        device: 测试设备 (CPU/GPU)
        test_loader: 测试数据加载器
    返回:
        test_loss: 平均测试损失
        accuracy: 测试准确率百分比
    """
    model.eval()
    total_loss = 0
    correct = 0
    total_samples = len(test_loader.dataset)

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)

            # 前向传播
            output = model(data)

            # 计算损失
            total_loss += nn.functional.cross_entropy(
                output, target, reduction='sum').item()

            # 统计正确预测数
            _, predicted = output.max(1)
            correct += predicted.eq(target).sum().item()

    # 计算平均损失和准确率
    avg_loss = total_loss / total_samples
    accuracy = 100. * correct / total_samples
    return avg_loss, accuracy


# 6. 绘制训练曲线
def plot_training_curves(alexnet_stats, custom_stats, save_path="results"):
    """
    绘制训练和测试的损失和准确率曲线
    参数:
        alexnet_stats: AlexNet的训练统计信息
        custom_stats: 自定义网络的训练统计信息
        save_path: 图像保存路径
    """
    plt.figure(figsize=(14, 6))

    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(alexnet_stats['train_loss'], 'b-', label='AlexNet Train')
    plt.plot(alexnet_stats['test_loss'], 'b--', label='AlexNet Test')
    plt.plot(custom_stats['train_loss'], 'r-', label='Custom Train')
    plt.plot(custom_stats['test_loss'], 'r--', label='Custom Test')
    plt.title('Training and Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(alexnet_stats['train_acc'], 'b-', label='AlexNet Train')
    plt.plot(alexnet_stats['test_acc'], 'b--', label='AlexNet Test')
    plt.plot(custom_stats['train_acc'], 'r-', label='Custom Train')
    plt.plot(custom_stats['test_acc'], 'r--', label='Custom Test')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()

    # 保存图像
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    plt.savefig(os.path.join(save_path, 'training_curves.png'))
    plt.savefig(os.path.join(save_path, 'training_curves.pdf'))
    plt.close()


# 7. 主训练函数
def train_and_evaluate():
    """
    主训练和评估函数
    """
    # 选择设备 (GPU优先)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 加载数据
    train_loader, test_loader = load_data()

    # 初始化模型
    models = {
        "AlexNet": AlexNetMNIST().to(device),
        "Custom": CustomNet().to(device)
    }

    # 优化器设置
    optimizers = {
        "AlexNet": optim.Adam(models["AlexNet"].parameters(), lr=0.0001, weight_decay=1e-5),
        "Custom": optim.Adam(models["Custom"].parameters(), lr=0.001, weight_decay=1e-5)
    }

    # 训练参数
    epochs = 15
    l2_lambda = 0.0001  # L2正则化系数

    # 记录训练统计信息
    stats = {name: {'train_loss': [], 'test_loss': [], 'train_acc': [], 'test_acc': []}
             for name in models.keys()}

    # 训练循环
    start_time = time.time()

    for epoch in range(1, epochs + 1):
        epoch_start = time.time()

        for name in models.keys():
            # 训练一个epoch
            train_loss, train_acc = train(
                models[name], device, train_loader,
                optimizers[name], epoch, l2_lambda
            )

            # 在测试集上评估
            test_loss, test_acc = test(
                models[name], device, test_loader
            )

            # 记录统计信息
            stats[name]['train_loss'].append(train_loss)
            stats[name]['train_acc'].append(train_acc)
            stats[name]['test_loss'].append(test_loss)
            stats[name]['test_acc'].append(test_acc)

            # 打印结果
            print(f"\n{name} | Epoch {epoch}/{epochs} | "
                  f"Train Loss: {train_loss:.4f} | Test Loss: {test_loss:.4f} | "
                  f"Train Acc: {train_acc:.2f}% | Test Acc: {test_acc:.2f}%")

        epoch_time = time.time() - epoch_start
        print(f"Epoch time: {epoch_time:.1f} seconds | "
              f"Total time: {time.time() - start_time:.1f} seconds")
        print("-" * 80)

    total_time = time.time() - start_time
    print(f"\nTraining completed in {total_time:.1f} seconds")

    # 8. 绘制训练曲线
    plot_training_curves(stats["AlexNet"], stats["Custom"])

    # 9. 最终性能比较
    print("\nFinal Performance Comparison:")
    for name in models.keys():
        best_acc = max(stats[name]['test_acc'])
        print(f"{name} - Best Test Accuracy: {best_acc:.2f}%")


if __name__ == '__main__':
    train_and_evaluate()