import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from time import time
from torchvision.models import resnet18

# 设置随机种子保证可重复性
torch.manual_seed(42)
np.random.seed(42)

# 1. 数据预处理和加载
transform = transforms.Compose([
    transforms.Resize((32, 32)),  # ResNet默认输入32x32，MNIST是28x28需要放大
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))  # MNIST的均值和标准差
])

# 加载数据集
train_dataset = MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = MNIST(root='./data', train=False, download=True, transform=transform)

# 数据加载器
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=2)


# 2. 实现ResNet的基础残差块
class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, in_channels, out_channels, stride=1):
        super(BasicBlock, self).__init__()

        # 第一个卷积层
        self.conv1 = nn.Conv2d(
            in_channels, out_channels, kernel_size=3,
            stride=stride, padding=1, bias=False
        )
        self.bn1 = nn.BatchNorm2d(out_channels)

        # 第二个卷积层
        self.conv2 = nn.Conv2d(
            out_channels, out_channels, kernel_size=3,
            stride=1, padding=1, bias=False
        )
        self.bn2 = nn.BatchNorm2d(out_channels)

        # 快捷连接（当输入输出维度不匹配时）
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != self.expansion * out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(
                    in_channels, self.expansion * out_channels,
                    kernel_size=1, stride=stride, bias=False
                ),
                nn.BatchNorm2d(self.expansion * out_channels)
            )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        out += self.shortcut(identity)  # 残差连接
        out = self.relu(out)

        return out


# 3. 实现简化版ResNet (适配MNIST的1通道输入)
class ResNet(nn.Module):
    def __init__(self, block, num_blocks, num_classes=10):
        super(ResNet, self).__init__()
        self.in_channels = 64

        # 初始卷积层 (原始ResNet有更复杂的预处理，这里简化)
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        # 残差层
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)

        # 分类器
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

    def _make_layer(self, block, out_channels, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_channels, out_channels, stride))
            self.in_channels = out_channels * block.expansion
        return nn.Sequential(*layers)

    def forward(self, x):
        # N x 1 x 32 x 32
        x = self.conv1(x)  # N x 64 x 8 x 8

        x = self.layer1(x)  # N x 64 x 8 x 8
        x = self.layer2(x)  # N x 128 x 4 x 4
        x = self.layer3(x)  # N x 256 x 2 x 2
        x = self.layer4(x)  # N x 512 x 1 x 1

        x = self.avgpool(x)  # N x 512 x 1 x 1
        x = torch.flatten(x, 1)  # N x 512
        x = self.fc(x)  # N x num_classes
        return x


# 4. 实现一个简单的CNN作为对比
class SimpleCNN(nn.Module):
    def __init__(self, num_classes=10):
        super(SimpleCNN, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )

        self.classifier = nn.Sequential(
            nn.Linear(128 * 4 * 4, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(512, num_classes)
        )

    def forward(self, x):
        x = self.features(x)  # N x 128 x 4 x 4
        x = torch.flatten(x, 1)  # N x 2048
        x = self.classifier(x)  # N x num_classes
        return x


# 5. 训练和测试函数
def train(model, device, train_loader, optimizer, criterion, epoch, l1_lambda=0.0001):
    model.train()
    train_loss = 0
    correct = 0
    total = 0

    for batch_idx, (inputs, targets) in enumerate(train_loader):
        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # L1正则化
        if l1_lambda > 0:
            l1_loss = 0
            for param in model.parameters():
                l1_loss += torch.sum(torch.abs(param))
            loss += l1_lambda * l1_loss

        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

    acc = 100. * correct / total
    avg_loss = train_loss / len(train_loader)
    print(f'Train Epoch: {epoch} | Loss: {avg_loss:.4f} | Acc: {acc:.2f}%')
    return avg_loss, acc


def test(model, device, test_loader, criterion):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    acc = 100. * correct / total
    avg_loss = test_loss / len(test_loader)
    print(f'Test Loss: {avg_loss:.4f} | Acc: {acc:.2f}%')
    return avg_loss, acc


# 6. 主函数
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 创建模型
    resnet = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)  # ResNet18结构
    pretrained_resnet = resnet18(pretrained=False)
    pretrained_resnet.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)  # 修改第一层适配MNIST
    pretrained_resnet.fc = nn.Linear(pretrained_resnet.fc.in_features, 10)  # 修改输出层
    pretrained_resnet = pretrained_resnet.to(device)
    simple_cnn = SimpleCNN().to(device)

    # 定义损失函数和优化器 (带L2正则化)
    criterion = nn.CrossEntropyLoss()
    optimizer_resnet = optim.Adam(resnet.parameters(), lr=0.001, weight_decay=1e-5)  # L2正则化通过weight_decay实现
    optimizer_pretrained = optim.Adam(pretrained_resnet.parameters(), lr=0.001, weight_decay=1e-5)
    optimizer_simple = optim.Adam(simple_cnn.parameters(), lr=0.001, weight_decay=1e-5)

    # 训练参数
    epochs = 15
    resnet_train_loss, resnet_train_acc = [], []
    resnet_test_loss, resnet_test_acc = [], []
    pretrained_train_loss, pretrained_train_acc = [], []
    pretrained_test_loss, pretrained_test_acc = [], []
    simple_train_loss, simple_train_acc = [], []
    simple_test_loss, simple_test_acc = [], []

    # 训练自定义ResNet
    print("\nTraining Custom ResNet...")
    start_time = time()
    for epoch in range(1, epochs + 1):
        train_loss, train_acc = train(resnet, device, train_loader, optimizer_resnet, criterion, epoch)
        test_loss, test_acc = test(resnet, device, test_loader, criterion)

        resnet_train_loss.append(train_loss)
        resnet_train_acc.append(train_acc)
        resnet_test_loss.append(test_loss)
        resnet_test_acc.append(test_acc)
    print(f"Custom ResNet training time: {time() - start_time:.2f} seconds")

    # 训练官方ResNet
    print("\nTraining Official ResNet...")
    start_time = time()
    for epoch in range(1, epochs + 1):
        train_loss, train_acc = train(pretrained_resnet, device, train_loader, optimizer_pretrained, criterion, epoch)
        test_loss, test_acc = test(pretrained_resnet, device, test_loader, criterion)

        pretrained_train_loss.append(train_loss)
        pretrained_train_acc.append(train_acc)
        pretrained_test_loss.append(test_loss)
        pretrained_test_acc.append(test_acc)
    print(f"Official ResNet training time: {time() - start_time:.2f} seconds")

    # 训练SimpleCNN
    print("\nTraining SimpleCNN...")
    start_time = time()
    for epoch in range(1, epochs + 1):
        train_loss, train_acc = train(simple_cnn, device, train_loader, optimizer_simple, criterion, epoch)
        test_loss, test_acc = test(simple_cnn, device, test_loader, criterion)

        simple_train_loss.append(train_loss)
        simple_train_acc.append(train_acc)
        simple_test_loss.append(test_loss)
        simple_test_acc.append(test_acc)
    print(f"SimpleCNN training time: {time() - start_time:.2f} seconds")

    # 7. 可视化结果
    plt.figure(figsize=(15, 6))

    # 绘制准确率曲线
    plt.subplot(1, 2, 1)
    plt.plot(resnet_train_acc, label='Custom ResNet Train')
    plt.plot(resnet_test_acc, label='Custom ResNet Test')
    plt.plot(pretrained_train_acc, label='Official ResNet Train')
    plt.plot(pretrained_test_acc, label='Official ResNet Test')
    plt.plot(simple_train_acc, label='SimpleCNN Train')
    plt.plot(simple_test_acc, label='SimpleCNN Test')
    plt.title('Accuracy vs Epoch')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)

    # 绘制损失曲线
    plt.subplot(1, 2, 2)
    plt.plot(resnet_train_loss, label='Custom ResNet Train')
    plt.plot(resnet_test_loss, label='Custom ResNet Test')
    plt.plot(pretrained_train_loss, label='Official ResNet Train')
    plt.plot(pretrained_test_loss, label='Official ResNet Test')
    plt.plot(simple_train_loss, label='SimpleCNN Train')
    plt.plot(simple_test_loss, label='SimpleCNN Test')
    plt.title('Loss vs Epoch')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()
    plt.savefig('resnet_training_curves.png')  # 保存图像
    plt.show()

    # 8. 比较最终性能
    print("\nFinal Performance Comparison:")
    print(f"Custom ResNet Test Accuracy: {resnet_test_acc[-1]:.2f}%")
    print(f"Official ResNet Test Accuracy: {pretrained_test_acc[-1]:.2f}%")
    print(f"SimpleCNN Test Accuracy: {simple_test_acc[-1]:.2f}%")


if __name__ == '__main__':
    main()