import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import time
import os

# 设置随机种子保证可复现性
torch.manual_seed(42)
np.random.seed(42)

# 1. 数据加载与预处理
transform = transforms.Compose([
    transforms.ToTensor(),                     # 转为Tensor并自动归一化到[0,1]
    transforms.Normalize((0.1307,), (0.3081,)) # MNIST的均值和标准差
])

# 下载并加载MNIST数据集
train_dataset = datasets.MNIST(
    root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(
    root='./data', train=False, download=True, transform=transform)

# 创建数据加载器
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# 2. 实现LeNet-5模型（带BN层）
class LeNet5(nn.Module):
    def __init__(self):
        super(LeNet5, self).__init__()
        # 特征提取部分
        self.features = nn.Sequential(
            nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=2),  # 输入1通道，输出6通道
            nn.BatchNorm2d(6),  # BN层
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2, stride=2),  # 原论文使用AvgPool
            
            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2, stride=2),
        )
        # 分类器部分
        self.classifier = nn.Sequential(
            nn.Linear(16 * 5 * 5, 120),  # 原始LeNet输入是32x32，MNIST是28x28，这里调整为16 * 5 * 5
            nn.BatchNorm1d(120),
            nn.ReLU(),
            
            nn.Linear(120, 84),
            nn.BatchNorm1d(84),
            nn.ReLU(),
            
            nn.Linear(84, 10)
        )

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, 1)  # 展平多维特征图
        x = self.classifier(x)
        return x

# 3. 实现自定义网络（简化版LeNet）
class CustomNet(nn.Module):
    def __init__(self):
        super(CustomNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.bn1 = nn.BatchNorm2d(10)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.bn2 = nn.BatchNorm2d(20)
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)
        self.dropout = nn.Dropout(0.5)  # 添加Dropout正则化

    def forward(self, x):
        x = nn.functional.relu(nn.functional.max_pool2d(self.bn1(self.conv1(x)), 2))
        x = nn.functional.relu(nn.functional.max_pool2d(self.bn2(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = nn.functional.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.fc2(x)
        return x

# 4. 训练和测试函数
def train(model, device, train_loader, optimizer, epoch, l2_lambda=0.001):
    model.train()
    train_loss = 0
    correct = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = nn.functional.cross_entropy(output, target)
        
        # 添加L2正则化
        l2_reg = torch.tensor(0.).to(device)
        for param in model.parameters():
            l2_reg += torch.norm(param)
        loss += l2_lambda * l2_reg
        
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item()
        pred = output.argmax(dim=1, keepdim=True)
        correct += pred.eq(target.view_as(pred)).sum().item()
        
    train_loss /= len(train_loader.dataset)
    accuracy = 100. * correct / len(train_loader.dataset)
    return train_loss, accuracy

def test(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += nn.functional.cross_entropy(output, target, reduction='sum').item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
    
    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    return test_loss, accuracy

# 5. 主函数
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 创建模型
    lenet_model = LeNet5().to(device)
    custom_model = CustomNet().to(device)
    
    # 优化器设置
    lenet_optimizer = optim.Adam(lenet_model.parameters(), lr=0.001)
    custom_optimizer = optim.Adam(custom_model.parameters(), lr=0.001)
    
    # 训练参数
    epochs = 15
    l2_lambda = 0.001  # L2正则化系数
    
    # 记录训练过程
    lenet_train_losses, lenet_test_losses = [], []
    lenet_train_acc, lenet_test_acc = [], []
    custom_train_losses, custom_test_losses = [], []
    custom_train_acc, custom_test_acc = [], []
    
    # 训练和测试循环
    for epoch in range(1, epochs + 1):
        # LeNet训练和测试
        train_loss, train_acc = train(lenet_model, device, train_loader, lenet_optimizer, epoch, l2_lambda)
        test_loss, test_acc = test(lenet_model, device, test_loader)
        lenet_train_losses.append(train_loss)
        lenet_test_losses.append(test_loss)
        lenet_train_acc.append(train_acc)
        lenet_test_acc.append(test_acc)
        
        # CustomNet训练和测试
        train_loss, train_acc = train(custom_model, device, train_loader, custom_optimizer, epoch, l2_lambda)
        test_loss, test_acc = test(custom_model, device, test_loader)
        custom_train_losses.append(train_loss)
        custom_test_losses.append(test_loss)
        custom_train_acc.append(train_acc)
        custom_test_acc.append(test_acc)
        
        print(f'Epoch {epoch}:')
        print(f'LeNet - Train Loss: {train_loss:.4f}, Test Loss: {test_loss:.4f} | Train Acc: {train_acc:.2f}%, Test Acc: {test_acc:.2f}%')
        print(f'Custom - Train Loss: {train_loss:.4f}, Test Loss: {test_loss:.4f} | Train Acc: {train_acc:.2f}%, Test Acc: {test_acc:.2f}%')
        print('-' * 60)
    
    # 6. 可视化结果
    plt.figure(figsize=(12, 5))
    
    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(lenet_train_losses, label='LeNet Train Loss')
    plt.plot(lenet_test_losses, label='LeNet Test Loss')
    plt.plot(custom_train_losses, '--', label='Custom Train Loss')
    plt.plot(custom_test_losses, '--', label='Custom Test Loss')
    plt.title('Training and Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    
    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(lenet_train_acc, label='LeNet Train Acc')
    plt.plot(lenet_test_acc, label='LeNet Test Acc')
    plt.plot(custom_train_acc, '--', label='Custom Train Acc')
    plt.plot(custom_test_acc, '--', label='Custom Test Acc')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    
    # 保存图像
    if not os.path.exists('results'):
        os.makedirs('results')
    plt.savefig('results/training_curves.png')
    plt.show()
    
    # 7. 最终性能比较
    print('\nFinal Performance Comparison:')
    print(f'LeNet-5 Test Accuracy: {lenet_test_acc[-1]:.2f}%')
    print(f'CustomNet Test Accuracy: {custom_test_acc[-1]:.2f}%')

if __name__ == '__main__':
    main()