import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from time import time

# 设置随机种子保证可重复性
torch.manual_seed(42)
np.random.seed(42)

# 1. 数据预处理和加载
transform = transforms.Compose([
    transforms.Resize((32, 32)),  # GoogLeNet默认输入32x32，MNIST是28x28需要放大
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))  # MNIST的均值和标准差
])

# 加载数据集
train_dataset = MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = MNIST(root='./data', train=False, download=True, transform=transform)

# 数据加载器
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=2)


# 2. 实现GoogLeNet的Inception模块
class Inception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super(Inception, self).__init__()

        # 1x1卷积分支
        self.branch1 = nn.Sequential(
            nn.Conv2d(in_channels, ch1x1, kernel_size=1),
            nn.BatchNorm2d(ch1x1),
            nn.ReLU(inplace=True)
        )

        # 1x1卷积接3x3卷积分支
        self.branch2 = nn.Sequential(
            nn.Conv2d(in_channels, ch3x3red, kernel_size=1),
            nn.BatchNorm2d(ch3x3red),
            nn.ReLU(inplace=True),
            nn.Conv2d(ch3x3red, ch3x3, kernel_size=3, padding=1),
            nn.BatchNorm2d(ch3x3),
            nn.ReLU(inplace=True)
        )

        # 1x1卷积接5x5卷积分支
        self.branch3 = nn.Sequential(
            nn.Conv2d(in_channels, ch5x5red, kernel_size=1),
            nn.BatchNorm2d(ch5x5red),
            nn.ReLU(inplace=True),
            nn.Conv2d(ch5x5red, ch5x5, kernel_size=5, padding=2),
            nn.BatchNorm2d(ch5x5),
            nn.ReLU(inplace=True)
        )

        # 3x3池化接1x1卷积分支
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            nn.Conv2d(in_channels, pool_proj, kernel_size=1),
            nn.BatchNorm2d(pool_proj),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return torch.cat([self.branch1(x), self.branch2(x), self.branch3(x), self.branch4(x)], 1)


# 3. 实现简化版GoogLeNet (适配MNIST的1通道输入)
class GoogLeNet(nn.Module):
    def __init__(self, num_classes=10):
        super(GoogLeNet, self).__init__()

        # 初始卷积层 (原始GoogLeNet有更复杂的预处理，这里简化)
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 192, kernel_size=3, padding=1),
            nn.BatchNorm2d(192),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        # Inception模块 (3a, 3b)
        self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
        self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Inception模块 (4a, 4b, 4c, 4d, 4e)
        self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
        self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
        self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
        self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
        self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # Inception模块 (5a, 5b)
        self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
        self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)

        # 全局平均池化和全连接层
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(0.4)
        self.fc = nn.Linear(1024, num_classes)

    def forward(self, x):
        # N x 1 x 32 x 32
        x = self.conv1(x)  # N x 64 x 16 x 16
        x = self.conv2(x)  # N x 192 x 8 x 8

        # Inception模块
        x = self.inception3a(x)  # N x 256 x 8 x 8
        x = self.inception3b(x)  # N x 480 x 8 x 8
        x = self.maxpool3(x)  # N x 480 x 4 x 4

        x = self.inception4a(x)  # N x 512 x 4 x 4
        x = self.inception4b(x)  # N x 512 x 4 x 4
        x = self.inception4c(x)  # N x 512 x 4 x 4
        x = self.inception4d(x)  # N x 528 x 4 x 4
        x = self.inception4e(x)  # N x 832 x 4 x 4
        x = self.maxpool4(x)  # N x 832 x 2 x 2

        x = self.inception5a(x)  # N x 832 x 2 x 2
        x = self.inception5b(x)  # N x 1024 x 2 x 2

        # 分类器
        x = self.avgpool(x)  # N x 1024 x 1 x 1
        x = torch.flatten(x, 1)  # N x 1024
        x = self.dropout(x)
        x = self.fc(x)  # N x num_classes
        return x


# 4. 实现一个简单的CNN作为对比
class SimpleCNN(nn.Module):
    def __init__(self, num_classes=10):
        super(SimpleCNN, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )

        self.classifier = nn.Sequential(
            nn.Linear(128 * 4 * 4, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(512, num_classes)
        )

    def forward(self, x):
        x = self.features(x)  # N x 128 x 4 x 4
        x = torch.flatten(x, 1)  # N x 2048
        x = self.classifier(x)  # N x num_classes
        return x


# 5. 训练和测试函数
def train(model, device, train_loader, optimizer, criterion, epoch, l1_lambda=0.0001):
    model.train()
    train_loss = 0
    correct = 0
    total = 0

    for batch_idx, (inputs, targets) in enumerate(train_loader):
        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # L1正则化
        if l1_lambda > 0:
            l1_loss = 0
            for param in model.parameters():
                l1_loss += torch.sum(torch.abs(param))
            loss += l1_lambda * l1_loss

        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

    acc = 100. * correct / total
    avg_loss = train_loss / len(train_loader)
    print(f'Train Epoch: {epoch} | Loss: {avg_loss:.4f} | Acc: {acc:.2f}%')
    return avg_loss, acc


def test(model, device, test_loader, criterion):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    acc = 100. * correct / total
    avg_loss = test_loss / len(test_loader)
    print(f'Test Loss: {avg_loss:.4f} | Acc: {acc:.2f}%')
    return avg_loss, acc


# 6. 主函数
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 创建模型
    googlenet = GoogLeNet().to(device)
    simple_cnn = SimpleCNN().to(device)

    # 定义损失函数和优化器 (带L2正则化)
    criterion = nn.CrossEntropyLoss()
    optimizer_googlenet = optim.Adam(googlenet.parameters(), lr=0.001, weight_decay=1e-5)  # L2正则化通过weight_decay实现
    optimizer_simple = optim.Adam(simple_cnn.parameters(), lr=0.001, weight_decay=1e-5)

    # 训练参数
    epochs = 15
    googlenet_train_loss, googlenet_train_acc = [], []
    googlenet_test_loss, googlenet_test_acc = [], []
    simple_train_loss, simple_train_acc = [], []
    simple_test_loss, simple_test_acc = [], []

    # 训练GoogLeNet
    print("\nTraining GoogLeNet...")
    start_time = time()
    for epoch in range(1, epochs + 1):
        train_loss, train_acc = train(googlenet, device, train_loader, optimizer_googlenet, criterion, epoch)
        test_loss, test_acc = test(googlenet, device, test_loader, criterion)

        googlenet_train_loss.append(train_loss)
        googlenet_train_acc.append(train_acc)
        googlenet_test_loss.append(test_loss)
        googlenet_test_acc.append(test_acc)
    print(f"GoogLeNet training time: {time() - start_time:.2f} seconds")

    # 训练SimpleCNN
    print("\nTraining SimpleCNN...")
    start_time = time()
    for epoch in range(1, epochs + 1):
        train_loss, train_acc = train(simple_cnn, device, train_loader, optimizer_simple, criterion, epoch)
        test_loss, test_acc = test(simple_cnn, device, test_loader, criterion)

        simple_train_loss.append(train_loss)
        simple_train_acc.append(train_acc)
        simple_test_loss.append(test_loss)
        simple_test_acc.append(test_acc)
    print(f"SimpleCNN training time: {time() - start_time:.2f} seconds")

    # 7. 可视化结果
    plt.figure(figsize=(12, 5))

    # 绘制准确率曲线
    plt.subplot(1, 2, 1)
    plt.plot(googlenet_train_acc, label='GoogLeNet Train')
    plt.plot(googlenet_test_acc, label='GoogLeNet Test')
    plt.plot(simple_train_acc, label='SimpleCNN Train')
    plt.plot(simple_test_acc, label='SimpleCNN Test')
    plt.title('Accuracy vs Epoch')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)

    # 绘制损失曲线
    plt.subplot(1, 2, 2)
    plt.plot(googlenet_train_loss, label='GoogLeNet Train')
    plt.plot(googlenet_test_loss, label='GoogLeNet Test')
    plt.plot(simple_train_loss, label='SimpleCNN Train')
    plt.plot(simple_test_loss, label='SimpleCNN Test')
    plt.title('Loss vs Epoch')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig('training_curves.png')  # 保存图像
    plt.show()

    # 8. 比较最终性能
    print("\nFinal Performance Comparison:")
    print(f"GoogLeNet Test Accuracy: {googlenet_test_acc[-1]:.2f}%")
    print(f"SimpleCNN Test Accuracy: {simple_test_acc[-1]:.2f}%")


if __name__ == '__main__':
    main()