import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import time
from collections import defaultdict

# 设备配置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")


# 数据加载函数
def get_data_loaders(batch_size=128):
    """加载MNIST数据集"""
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=True, download=True, transform=transform),
        batch_size=batch_size, shuffle=True
    )

    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=False, transform=transform),
        batch_size=batch_size, shuffle=False
    )

    return train_loader, test_loader


# 获取数据加载器
train_loader, test_loader = get_data_loaders(batch_size=128)


# 1. 首先定义AdvancedTrainer类
class AdvancedTrainer:
    """高级训练器，集成多种优化技术"""

    def __init__(self, model, optimizer, criterion, device=None):
        self.model = model
        self.optimizer = optimizer
        self.criterion = criterion
        self.device = device if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model.to(self.device)

        # 训练记录
        self.train_losses = []
        self.train_accs = []
        self.test_losses = []
        self.test_accs = []
        self.lr_history = []

    def train_epoch(self, train_loader):
        """训练一个epoch"""
        self.model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        for data, target in train_loader:
            data, target = data.to(self.device), target.to(self.device)

            self.optimizer.zero_grad()
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()

            running_loss += loss.item()
            _, predicted = output.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()

        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100. * correct / total

        return epoch_loss, epoch_acc

    def evaluate(self, test_loader):
        """评估模型性能"""
        self.model.eval()
        test_loss = 0
        correct = 0
        total = 0

        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                test_loss += self.criterion(output, target).item()
                _, predicted = output.max(1)
                total += target.size(0)
                correct += predicted.eq(target).sum().item()

        test_loss /= len(test_loader)
        test_acc = 100. * correct / total

        return test_loss, test_acc

    def train(self, train_loader, test_loader, epochs, scheduler=None):
        """完整的训练流程"""
        best_acc = 0
        patience = 5
        patience_counter = 0

        print(
            f"{'Epoch':^6} | {'Train Loss':^10} | {'Train Acc':^10} | {'Test Loss':^10} | {'Test Acc':^10} | {'LR':^10}")
        print("-" * 80)

        for epoch in range(epochs):
            train_loss, train_acc = self.train_epoch(train_loader)
            test_loss, test_acc = self.evaluate(test_loader)

            # 学习率调度
            current_lr = self.optimizer.param_groups[0]['lr']
            if scheduler:
                scheduler.step()

            # 记录历史
            self.train_losses.append(train_loss)
            self.train_accs.append(train_acc)
            self.test_losses.append(test_loss)
            self.test_accs.append(test_acc)
            self.lr_history.append(current_lr)

            # 早停法
            if test_acc > best_acc:
                best_acc = test_acc
                patience_counter = 0
                torch.save(self.model.state_dict(), 'best_model.pth')
            else:
                patience_counter += 1

            if patience_counter >= patience:
                print(f"早停在第 {epoch + 1} 轮")
                break

            print(
                f"{epoch + 1:^6} | {train_loss:^10.4f} | {train_acc:^10.2f}% | {test_loss:^10.4f} | {test_acc:^10.2f}% | {current_lr:^10.6f}")

    def plot_training_curves(self, title="Training Curves"):
        """绘制训练曲线"""
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))

        # 损失曲线
        ax1.plot(self.train_losses, label='Training Loss')
        ax1.plot(self.test_losses, label='Test Loss')
        ax1.set_title(f'{title} - Loss')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.legend()
        ax1.grid(True)

        # 准确率曲线
        ax2.plot(self.train_accs, label='Training Accuracy')
        ax2.plot(self.test_accs, label='Test Accuracy')
        ax2.set_title(f'{title} - Accuracy')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Accuracy (%)')
        ax2.legend()
        ax2.grid(True)

        # 学习率曲线
        ax3.plot(self.lr_history, color='red')
        ax3.set_title('Learning Rate Schedule')
        ax3.set_xlabel('Epoch')
        ax3.set_ylabel('Learning Rate')
        ax3.grid(True)

        # 训练测试差距
        gap = [train - test for train, test in zip(self.train_accs, self.test_accs)]
        ax4.plot(gap, color='purple')
        ax4.set_title('Generalization Gap (Train Acc - Test Acc)')
        ax4.set_xlabel('Epoch')
        ax4.set_ylabel('Accuracy Gap (%)')
        ax4.grid(True)

        plt.tight_layout()
        plt.show()


# 2. 定义ImprovedLeNet类
class ImprovedLeNet(nn.Module):
    """改进的LeNet网络，支持多种激活函数和正则化"""

    def __init__(self, activation='relu', use_batchnorm=False, dropout_rate=0.5):
        super(ImprovedLeNet, self).__init__()

        # 激活函数选择
        if activation == 'relu':
            self.activation = nn.ReLU()
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'leaky_relu':
            self.activation = nn.LeakyReLU(0.1)
        else:
            self.activation = nn.ReLU()

        self.use_batchnorm = use_batchnorm
        self.dropout_rate = dropout_rate

        # 卷积层
        self.conv1 = nn.Conv2d(1, 32, 5, padding=2)  # 增加通道数
        self.conv2 = nn.Conv2d(32, 64, 5)

        # 批归一化
        if use_batchnorm:
            self.bn1 = nn.BatchNorm2d(32)
            self.bn2 = nn.BatchNorm2d(64)

        # 全连接层
        self.fc1 = nn.Linear(64 * 5 * 5, 256)  # 增加神经元数量
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, 10)

        # Dropout
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x):
        # 第一个卷积块
        x = self.conv1(x)
        if self.use_batchnorm:
            x = self.bn1(x)
        x = self.activation(x)
        x = F.max_pool2d(x, 2)  # 改用最大池化

        # 第二个卷积块
        x = self.conv2(x)
        if self.use_batchnorm:
            x = self.bn2(x)
        x = self.activation(x)
        x = F.max_pool2d(x, 2)

        # 展平
        x = x.view(x.size(0), -1)

        # 全连接层
        x = self.fc1(x)
        x = self.activation(x)
        x = self.dropout(x)

        x = self.fc2(x)
        x = self.activation(x)
        x = self.dropout(x)

        x = self.fc3(x)

        return x


# 3. 现在定义实验函数
def experiment_activation_functions():
    """激活函数对比实验"""
    activations = ['sigmoid', 'tanh', 'relu', 'leaky_relu']
    results = {}

    for activation in activations:
        print(f"\n=== 测试激活函数: {activation.upper()} ===")

        model = ImprovedLeNet(activation=activation, use_batchnorm=True)
        optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

        trainer = AdvancedTrainer(model, optimizer, nn.CrossEntropyLoss())
        trainer.train(train_loader, test_loader, epochs=10, scheduler=scheduler)

        results[activation] = {
            'train_acc': trainer.train_accs,
            'test_acc': trainer.test_accs,
            'final_test_acc': trainer.test_accs[-1] if trainer.test_accs else 0
        }

        trainer.plot_training_curves(f"Activation: {activation}")

    # 绘制对比图
    plt.figure(figsize=(10, 6))
    for activation in activations:
        plt.plot(results[activation]['test_acc'], label=activation.upper())

    plt.xlabel('Epoch')
    plt.ylabel('Test Accuracy (%)')
    plt.title('Activation Functions Comparison')
    plt.legend()
    plt.grid(True)
    plt.show()

    return results


# 4. 运行实验
if __name__ == "__main__":
    # 先运行一个简化的测试确保一切正常
    print("=== 开始激活函数对比实验 ===")

    # 测试一个简化的实验
    model = ImprovedLeNet(activation='relu', use_batchnorm=True)
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    trainer = AdvancedTrainer(model, optimizer, nn.CrossEntropyLoss())

    # 只训练2个epoch进行测试
    trainer.train(train_loader, test_loader, epochs=2)

    print("基础测试完成，开始完整实验...")

    # 运行完整的激活函数实验
    activation_results = experiment_activation_functions()

    # 打印结果摘要
    print("\n=== 激活函数实验结果摘要 ===")
    for activation, result in activation_results.items():
        print(f"{activation.upper()}: 最终测试准确率: {result['final_test_acc']:.2f}%")