import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F

# 设置随机种子保证可复现性
torch.manual_seed(42)
np.random.seed(42)


# 1. 数据预处理和加载
def load_data():
    """
    加载MNIST数据集并进行预处理：
    1. 调整尺寸为32x32以适配VGG网络
    2. 转换为Tensor并归一化到[0,1]
    3. 使用MNIST的均值和标准差进行标准化
    """
    transform = transforms.Compose([
        transforms.Resize((32, 32)),  # VGG网络需要32x32输入
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))  # MNIST的均值和标准差
    ])

    # 下载并加载数据集
    train_data = datasets.MNIST(
        root='./data', train=True, download=True, transform=transform)
    test_data = datasets.MNIST(
        root='./data', train=False, download=True, transform=transform)

    # 创建数据加载器
    batch_size = 256  # 适当增大批处理大小以提高效率
    num_workers = 4 if torch.cuda.is_available() else 2
    pin_memory = torch.cuda.is_available()

    train_loader = DataLoader(
        train_data, batch_size=batch_size, shuffle=True,
        num_workers=num_workers, pin_memory=pin_memory
    )
    test_loader = DataLoader(
        test_data, batch_size=batch_size, shuffle=False,
        num_workers=num_workers, pin_memory=pin_memory
    )

    return train_loader, test_loader


# 2. 实现VGG风格网络 - 轻量级版本适用于MNIST
class VGGStyleNet(nn.Module):
    def __init__(self, num_classes=10):
        """
        适用于MNIST的轻量级VGG网络
        包含批量归一化(BN)和Dropout正则化
        """
        super(VGGStyleNet, self).__init__()

        # 特征提取部分 - 类似VGG的小型配置
        self.features = nn.Sequential(
            # Block 1: 输入1通道(灰度图), 输出32通道
            nn.Conv2d(1, 32, kernel_size=3, padding=1),  # 32x32 -> 32x32
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 32, kernel_size=3, padding=1),  # 32x32 -> 32x32
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # 32x32 -> 16x16

            # Block 2: 输入32通道, 输出64通道
            nn.Conv2d(32, 64, kernel_size=3, padding=1),  # 16x16 -> 16x16
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),  # 16x16 -> 16x16
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # 16x16 -> 8x8

            # Block 3: 输入64通道, 输出128通道
            nn.Conv2d(64, 128, kernel_size=3, padding=1),  # 8x8 -> 8x8
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel_size=3, padding=1),  # 8x8 -> 8x8
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # 8x8 -> 4x4
        )

        # 分类器部分
        self.classifier = nn.Sequential(
            nn.Linear(128 * 4 * 4, 256),  # 128 * 4 * 4 = 2048
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),  # Dropout用于正则化

            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),

            nn.Linear(128, num_classes),
        )

        # 权重初始化
        self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, 1)  # 展平特征图
        x = self.classifier(x)
        return x


# 3. 实现对比用的自定义网络
class CustomNet(nn.Module):
    def __init__(self, num_classes=10):
        super(CustomNet, self).__init__()

        # 特征提取部分
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=5, padding=2),  # 32x32 -> 32x32
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # 32x32 -> 16x16

            nn.Conv2d(32, 64, kernel_size=3, padding=1),  # 16x16 -> 16x16
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # 16x16 -> 8x8
        )

        # 分类器部分
        self.classifier = nn.Sequential(
            nn.Linear(64 * 8 * 8, 128),  # 64 * 8 * 8 = 4096
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),

            nn.Linear(128, num_classes),
        )

        # 权重初始化
        self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


# 4. 训练函数（含L2正则化）
def train(model, device, train_loader, optimizer, epoch, l2_lambda=0.0001):
    """
    训练模型一个epoch
    参数:
        model: 要训练的模型
        device: 训练设备 (CPU/GPU)
        train_loader: 训练数据加载器
        optimizer: 优化器
        epoch: 当前epoch数
        l2_lambda: L2正则化系数
    返回:
        train_loss: 平均训练损失
        accuracy: 训练准确率百分比
    """
    model.train()
    total_loss = 0
    correct = 0
    total_samples = 0

    for batch_idx, (data, target) in enumerate(train_loader):
        # 将数据移动到设备
        data, target = data.to(device, non_blocking=True), target.to(device, non_blocking=True)
        optimizer.zero_grad()

        # 前向传播
        output = model(data)

        # 计算损失
        loss = F.cross_entropy(output, target)

        # 添加L2正则化
        if l2_lambda > 0:
            l2_reg = torch.tensor(0.).to(device)
            for param in model.parameters():
                l2_reg += torch.norm(param)
            loss += l2_lambda * l2_reg

        # 反向传播
        loss.backward()
        optimizer.step()

        # 更新统计信息
        total_loss += loss.item() * data.size(0)
        _, predicted = output.max(1)
        correct += predicted.eq(target).sum().item()
        total_samples += data.size(0)

        # 每100个batch打印进度
        if batch_idx % 100 == 0 and batch_idx > 0:
            batch_acc = 100. * predicted.eq(target).sum().item() / data.size(0)
            print(f'Epoch: {epoch} | Batch: {batch_idx}/{len(train_loader)} | '
                  f'Loss: {loss.item():.4f} | Acc: {batch_acc:.2f}%')

    # 计算平均损失和准确率
    avg_loss = total_loss / total_samples
    accuracy = 100. * correct / total_samples
    return avg_loss, accuracy


# 5. 测试函数
def test(model, device, test_loader):
    """
    测试模型性能
    参数:
        model: 要测试的模型
        device: 测试设备 (CPU/GPU)
        test_loader: 测试数据加载器
    返回:
        test_loss: 平均测试损失
        accuracy: 测试准确率百分比
    """
    model.eval()
    total_loss = 0
    correct = 0
    total_samples = len(test_loader.dataset)

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            total_loss += F.cross_entropy(output, target, reduction='sum').item()
            _, predicted = output.max(1)
            correct += predicted.eq(target).sum().item()

    # 计算平均损失和准确率
    avg_loss = total_loss / total_samples
    accuracy = 100. * correct / total_samples
    return avg_loss, accuracy


# 6. 绘制训练曲线
def plot_training_curves(vgg_stats, custom_stats, save_path="results"):
    """
    绘制训练和测试的损失和准确率曲线
    参数:
        vgg_stats: VGGStyleNet的训练统计信息
        custom_stats: CustomNet的训练统计信息
        save_path: 图像保存路径
    """
    plt.figure(figsize=(14, 6))

    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(vgg_stats['train_loss'], 'b-', label='VGG-Style Train')
    plt.plot(vgg_stats['test_loss'], 'b--', label='VGG-Style Test')
    plt.plot(custom_stats['train_loss'], 'r-', label='Custom Train')
    plt.plot(custom_stats['test_loss'], 'r--', label='Custom Test')
    plt.title('Training and Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(vgg_stats['train_acc'], 'b-', label='VGG-Style Train')
    plt.plot(vgg_stats['test_acc'], 'b--', label='VGG-Style Test')
    plt.plot(custom_stats['train_acc'], 'r-', label='Custom Train')
    plt.plot(custom_stats['test_acc'], 'r--', label='Custom Test')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()

    # 保存图像
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    plt.savefig(os.path.join(save_path, 'vgg_training_curves.png'), dpi=300)
    plt.close()
    print("训练曲线已保存到 results/vgg_training_curves.png")


# 7. 主训练函数
def train_and_evaluate():
    """
    主训练和评估函数
    """
    # 选择设备 (GPU优先)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 加载数据
    print("加载数据中...")
    train_loader, test_loader = load_data()

    # 初始化模型
    models = {
        "VGGStyle": VGGStyleNet().to(device),
        "Custom": CustomNet().to(device)
    }

    # 优化器设置
    optimizers = {
        "VGGStyle": optim.Adam(models["VGGStyle"].parameters(), lr=0.001, weight_decay=1e-4),
        "Custom": optim.Adam(models["Custom"].parameters(), lr=0.001, weight_decay=1e-4)
    }

    # 学习率调度器
    schedulers = {
        name: ReduceLROnPlateau(
            optimizer, mode='min', factor=0.5, patience=2, verbose=True
        )
        for name, optimizer in optimizers.items()
    }

    # 训练参数
    epochs = 15
    print(f"开始训练，共{epochs}个epoch...")

    # 记录训练统计信息
    stats = {
        "VGGStyle": {'train_loss': [], 'test_loss': [], 'train_acc': [], 'test_acc': []},
        "Custom": {'train_loss': [], 'test_loss': [], 'train_acc': [], 'test_acc': []}
    }

    # 训练循环
    start_time = time.time()

    for epoch in range(1, epochs + 1):
        epoch_start = time.time()
        print(f"\nEpoch {epoch}/{epochs}:")

        for name in models.keys():
            model = models[name]
            optimizer = optimizers[name]

            # 训练一个epoch
            train_loss, train_acc = train(
                model, device, train_loader, optimizer, epoch
            )

            # 在测试集上评估
            test_loss, test_acc = test(model, device, test_loader)

            # 更新学习率
            schedulers[name].step(test_loss)

            # 记录统计信息
            stats[name]['train_loss'].append(train_loss)
            stats[name]['train_acc'].append(train_acc)
            stats[name]['test_loss'].append(test_loss)
            stats[name]['test_acc'].append(test_acc)

            # 打印结果
            print(f"  {name}模型:")
            print(f"    训练损失: {train_loss:.4f}, 测试损失: {test_loss:.4f}")
            print(f"    训练准确率: {train_acc:.2f}%, 测试准确率: {test_acc:.2f}%")

        epoch_time = time.time() - epoch_start
        total_time = time.time() - start_time
        print(f"  本epoch耗时: {epoch_time:.1f}秒 | 累计时间: {total_time / 60:.1f}分钟")
        print("-" * 80)

    # 8. 绘制训练曲线
    print("\n绘制训练曲线中...")
    plot_training_curves(stats["VGGStyle"], stats["Custom"])

    # 9. 最终性能比较
    print("\n最终性能比较:")
    for name in models.keys():
        best_test_acc = max(stats[name]['test_acc'])
        final_test_acc = stats[name]['test_acc'][-1]
        print(f"  {name}模型 - 最高测试准确率: {best_test_acc:.2f}%")
        print(f"  {name}模型 - 最终测试准确率: {final_test_acc:.2f}%")


if __name__ == '__main__':
    # 开始训练
    train_start = time.time()
    train_and_evaluate()
    total_time = time.time() - train_start
    print(f"\n总训练时间: {total_time / 60:.1f}分钟")