# 第三题：基于PyTorch 2.0的CNN神经网络模型实现
"""
第三题，主要考核利用pytorch2.0实现基于CNN的神经网络模型，需要掌握CNN相关的概念及pytorch的相关
知识点。需要按照提供的模型图或描述、参数等编写神经网络的类。"""


import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

# 检查PyTorch版本
print(f"PyTorch版本: {torch.__version__}")
#输出尺寸 = (输入尺寸 - 卷积核尺寸 + 2 × 填充) / 步长 + 1
# 1. 基础CNN模型实现
class BasicCNN(nn.Module):
    """基础CNN模型，包含卷积层、池化层和全连接层"""
    
    def __init__(self, num_classes=10):
        """
        初始化CNN模型
        :param num_classes: 分类数量
        """
        super(BasicCNN, self).__init__()#继承了nn.Module的所有方法
        
        # 第一个卷积块
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1)
        self.bn1 = nn.BatchNorm2d(32)
        self.relu1 = nn.ReLU()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        
        # 第二个卷积块
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = nn.ReLU()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        
        # 第三个卷积块
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ReLU()
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        
        # 全连接层
        # 假设输入图像大小为28x28，经过3次池化后大小为3x3
        self.fc1 = nn.Linear(128 * 3 * 3, 512)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(512, num_classes)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量
        :return: 输出张量
        """
        # 第一个卷积块
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.pool1(x)
        
        # 第二个卷积块
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu2(x)
        x = self.pool2(x)
        
        # 第三个卷积块
        x = self.conv3(x)
        x = self.bn3(x)
        x = self.relu3(x)
        x = self.pool3(x)
        
        # - 获取张量 x 的第0维（通常是批次维度）的大小
        # 例如，如果 x 的形状是 [32, 128, 3, 3] ，则 x.size(0) 为32
        #在 view 函数中， -1 是一个特殊值，表示"自动计算"
        x = x.view(x.size(0), -1)
        
        # 全连接层
        x = self.fc1(x)
        x = self.relu1(x)
        x = self.dropout(x)
        x = self.fc2(x)
        
        return x

# 2. 带有残差连接的CNN模型
class ResidualBlock(nn.Module):
    """残差块"""
    
    def __init__(self, in_channels, out_channels, stride=1):
        """
        初始化残差块
        :param in_channels: 输入通道数
        :param out_channels: 输出通道数
        :param stride: 步长
        """
        super(ResidualBlock, self).__init__()
        
        # 主路径
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        
        # 捷径路径
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量
        :return: 输出张量
        """
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out += self.shortcut(x)
        out = self.relu(out)
        return out

class ResNetCNN(nn.Module):
    """基于残差网络的CNN模型"""
    
    def __init__(self, block, num_blocks, num_classes=10):
        """
        初始化ResNet模型
        :param block: 残差块类型
        :param num_blocks: 每层的残差块数量
        :param num_classes: 分类数量
        """
        super(ResNetCNN, self).__init__()
        self.in_channels = 64
        
        # 初始卷积层
        self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        
        # 残差层
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        
        # 全局平均池化和全连接层
        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512, num_classes)
    
    def _make_layer(self, block, out_channels, num_blocks, stride):
        """
        创建残差层
        :param block: 残差块类型
        :param out_channels: 输出通道数
        :param num_blocks: 残差块数量
        :param stride: 步长
        :return: 残差层
        """
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_channels, out_channels, stride))
            self.in_channels = out_channels
        return nn.Sequential(*layers)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量
        :return: 输出张量
        """
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.avg_pool(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out

def ResNet18(num_classes=10):
    """创建ResNet18模型"""
    return ResNetCNN(ResidualBlock, [2, 2, 2, 2], num_classes)

# 3. 模型训练和评估函数
def train_model(model, train_loader, criterion, optimizer, device, num_epochs=10):
    """
    训练模型
    :param model: 模型
    :param train_loader: 训练数据加载器
    :param criterion: 损失函数
    :param optimizer: 优化器
    :param device: 设备
    :param num_epochs: 训练轮数
    :return: 训练损失和准确率历史
    """
    model.train()
    train_loss_history = []
    train_acc_history = []
    
    for epoch in range(num_epochs):
        running_loss = 0.0
        correct = 0
        total = 0
        
        for i, (images, labels) in enumerate(train_loader):
            images, labels = images.to(device), labels.to(device)
            
            # 前向传播
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            # 统计信息
            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
        
        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100 * correct / total
        train_loss_history.append(epoch_loss)
        train_acc_history.append(epoch_acc)
        
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}, Accuracy: {epoch_acc:.2f}%')
    
    return train_loss_history, train_acc_history

def evaluate_model(model, test_loader, device):
    """
    评估模型
    :param model: 模型
    :param test_loader: 测试数据加载器
    :param device: 设备
    :return: 测试准确率
    """
    model.eval()
    correct = 0
    total = 0
    
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    accuracy = 100 * correct / total
    print(f'Test Accuracy: {accuracy:.2f}%')
    return accuracy

def plot_training_history(train_loss_history, train_acc_history):
    """
    绘制训练历史
    :param train_loss_history: 训练损失历史
    :param train_acc_history: 训练准确率历史
    """
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(train_loss_history)
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    
    plt.subplot(1, 2, 2)
    plt.plot(train_acc_history)
    plt.title('Training Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    
    plt.tight_layout()
    plt.show()

def visualize_predictions(model, test_loader, device, class_names=None, num_images=10):
    """
    可视化预测结果
    :param model: 模型
    :param test_loader: 测试数据加载器
    :param device: 设备
    :param class_names: 类别名称
    :param num_images: 可视化图像数量
    """
    model.eval()
    
    # 获取一批测试数据
    dataiter = iter(test_loader)
    images, labels = next(dataiter)
    images, labels = images.to(device), labels.to(device)
    
    # 预测
    outputs = model(images)
    _, predicted = torch.max(outputs, 1)
    
    # 显示图像和预测结果
    plt.figure(figsize=(12, 8))
    for i in range(min(num_images, len(images))):
        plt.subplot(2, 5, i+1)
        plt.imshow(images[i].cpu().squeeze(), cmap='gray')
        plt.axis('off')
        
        title_color = 'green' if predicted[i] == labels[i] else 'red'
        if class_names:
            title = f'Pred: {class_names[predicted[i]]}\nTrue: {class_names[labels[i]]}'
        else:
            title = f'Pred: {predicted[i]}\nTrue: {labels[i]}'
        
        plt.title(title, color=title_color)
    
    plt.tight_layout()
    plt.show()

# 4. 主函数和测试代码
if __name__ == "__main__":
    # 检查GPU是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 加载MNIST数据集
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])
    
    train_dataset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
    test_dataset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
    
    # 创建数据加载器
    batch_size = 64
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    
    # 类别名称
    class_names = [str(i) for i in range(10)]
    
    # 创建基础CNN模型
    print("训练基础CNN模型...")
    basic_cnn = BasicCNN(num_classes=10).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(basic_cnn.parameters(), lr=0.001)
    
    # 训练模型
    num_epochs = 5
    basic_loss_history, basic_acc_history = train_model(
        basic_cnn, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估模型
    basic_accuracy = evaluate_model(basic_cnn, test_loader, device)
    
    # 绘制训练历史
    plot_training_history(basic_loss_history, basic_acc_history)
    
    # 可视化预测结果
    visualize_predictions(basic_cnn, test_loader, device, class_names)
    
    # 创建ResNet模型
    print("\n训练ResNet模型...")
    resnet = ResNet18(num_classes=10).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(resnet.parameters(), lr=0.001)
    
    # 训练模型
    resnet_loss_history, resnet_acc_history = train_model(
        resnet, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估模型
    resnet_accuracy = evaluate_model(resnet, test_loader, device)
    
    # 绘制训练历史
    plot_training_history(resnet_loss_history, resnet_acc_history)
    
    # 可视化预测结果
    visualize_predictions(resnet, test_loader, device, class_names)
    
    # 比较两种模型的性能
    print(f"\n模型性能比较:")
    print(f"基础CNN测试准确率: {basic_accuracy:.2f}%")
    print(f"ResNet测试准确率: {resnet_accuracy:.2f}%")
    
    # 保存模型
    torch.save(basic_cnn.state_dict(), 'basic_cnn.pth')
    torch.save(resnet.state_dict(), 'resnet.pth')
    print("模型已保存")