import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import matplotlib.pyplot as plt
import os

# 设置中文字体（保持原逻辑）
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查是否有可用的GPU（保持原逻辑）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")


# 新增：ResNet残差块定义（核心修改）
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResidualBlock, self).__init__()
        # 主路径卷积（带BatchNorm）
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

        # 跳跃连接（通道/尺寸不匹配时使用1x1卷积）
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)  # 残差连接
        out = F.relu(out)
        return out


# 替换原GoogLeNet为ResNet模型（核心修改）
class ResNet(nn.Module):
    def __init__(self, num_classes=10):
        super(ResNet, self).__init__()
        # 初始卷积层（适配MNIST单通道输入）
        self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)  # 调整为3x3核适配小图像
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)  # 保持下采样

        # 残差块层（简化版ResNet-18结构）
        self.layer1 = self._make_layer(64, 64, 2, stride=1)  # 第1个块组（2个残差块）
        self.layer2 = self._make_layer(64, 128, 2, stride=2)  # 第2个块组（2个残差块，下采样）
        self.layer3 = self._make_layer(128, 256, 2, stride=2)  # 第3个块组（2个残差块）
        self.layer4 = self._make_layer(256, 512, 2, stride=2)  # 第4个块组（2个残差块）

        # 全局平均池化 + 全连接层
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512, num_classes)  # 最终分类层

    def _make_layer(self, in_channels, out_channels, num_blocks, stride):
        """构建残差块层（保持原逻辑结构）"""
        strides = [stride] + [1] * (num_blocks - 1)  # 仅第一个块调整步长
        layers = []
        for s in strides:
            layers.append(ResidualBlock(in_channels, out_channels, s))
            in_channels = out_channels
        return nn.Sequential(*layers)

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x


# 数据加载函数（完整补全）
def load_local_mnist(data_path="./mnist_data"):
    """从本地或torchvision加载MNIST数据（适配224x224输入）"""
    print(f"尝试从本地目录 {data_path} 加载MNIST数据...")
    mnist_npz_path = os.path.join(data_path, "mnist.npz") if os.path.isdir(data_path) else data_path

    if os.path.exists(mnist_npz_path) and mnist_npz_path.endswith('.npz'):
        try:
            with np.load(mnist_npz_path, allow_pickle=True) as f:
                x_train, y_train = f['x_train'], f['y_train']
                x_test, y_test = f['x_test'], f['y_test']
            print(f"成功从本地文件 {mnist_npz_path} 加载数据")

            # 调整图像尺寸到224x224（ResNet输入要求）
            x_train_resized = np.zeros((x_train.shape[0], 224, 224))
            x_test_resized = np.zeros((x_test.shape[0], 224, 224))
            for i in range(x_train.shape[0]):
                img = torch.from_numpy(x_train[i]).float()
                img = F.interpolate(img.unsqueeze(0).unsqueeze(0), size=(224, 224), mode='bilinear',
                                    align_corners=False)
                x_train_resized[i] = img.squeeze().numpy()
            for i in range(x_test.shape[0]):
                img = torch.from_numpy(x_test[i]).float()
                img = F.interpolate(img.unsqueeze(0).unsqueeze(0), size=(224, 224), mode='bilinear',
                                    align_corners=False)
                x_test_resized[i] = img.squeeze().numpy()

            x_train = torch.FloatTensor(x_train_resized).unsqueeze(1) / 255.0  # 单通道+归一化
            y_train = torch.LongTensor(y_train)
            x_test = torch.FloatTensor(x_test_resized).unsqueeze(1) / 255.0
            y_test = torch.LongTensor(y_test)
            return TensorDataset(x_train, y_train), TensorDataset(x_test, y_test)
        except Exception as e:
            print(f"本地加载失败: {e}")

    # 本地失败则使用torchvision加载
    print("使用torchvision方式加载MNIST数据...")
    import torchvision
    import torchvision.transforms as transforms
    transform = transforms.Compose([
        transforms.Resize((224, 224)),  # 调整到ResNet所需尺寸
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))  # MNIST均值和标准差
    ])
    return (
        torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform),
        torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
    )


# 可视化样本函数（完整补全）
def visualize_samples(train_loader, num_samples=9):
    """可视化训练样本（保持原实现）"""
    dataiter = iter(train_loader)
    images, labels = next(dataiter)
    plt.figure(figsize=(8, 8))
    for i in range(min(num_samples, len(images))):
        plt.subplot(3, 3, i + 1)
        img = images[i] / 2 + 0.5  # 反归一化显示
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)).squeeze(), cmap='gray')
        plt.title(f'标签: {labels[i].item()}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()


# 训练函数（完整补全）
def train_model(model, train_loader, test_loader, epochs=5):
    """训练主循环（保持原实现）"""
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    train_losses, train_accuracies, test_accuracies = [], [], []

    for epoch in range(epochs):
        model.train()
        running_loss = 0.0
        correct_train, total_train = 0, 0

        for i, (images, labels) in enumerate(train_loader):
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total_train += labels.size(0)
            correct_train += (predicted == labels).sum().item()

            if (i + 1) % 100 == 0:
                print(f'Epoch [{epoch + 1}/{epochs}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}')

        train_acc = 100 * correct_train / total_train
        avg_loss = running_loss / len(train_loader)
        test_acc = evaluate_model(model, test_loader)

        train_losses.append(avg_loss)
        train_accuracies.append(train_acc)
        test_accuracies.append(test_acc)
        print(
            f'Epoch [{epoch + 1}/{epochs}], 平均损失: {avg_loss:.4f}, 训练准确率: {train_acc:.2f}%, 测试准确率: {test_acc:.2f}%')

    return train_losses, train_accuracies, test_accuracies


# 评估函数（完整补全）
def evaluate_model(model, test_loader):
    """评估模型准确率（保持原实现）"""
    model.eval()
    correct, total = 0, 0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    return 100 * correct / total


# 训练历史绘图函数（完整补全）
def plot_training_history(train_losses, train_accuracies, test_accuracies):
    """绘制训练曲线（保持原实现）"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
    ax1.plot(train_losses, label='训练损失')
    ax1.set_title('模型损失')
    ax1.set_xlabel('轮次')
    ax1.set_ylabel('损失')
    ax1.legend()
    ax1.grid(True)

    ax2.plot(train_accuracies, label='训练准确率')
    ax2.plot(test_accuracies, label='测试准确率')
    ax2.set_title('模型准确率')
    ax2.set_xlabel('轮次')
    ax2.set_ylabel('准确率 (%)')
    ax2.legend()
    ax2.grid(True)
    plt.tight_layout()
    plt.show()


# 测试预测函数（完整补全）
def test_predictions(model, test_loader, num_samples=10):
    """测试预测并可视化（保持原实现）"""
    model.eval()
    dataiter = iter(test_loader)
    images, labels = next(dataiter)
    images, labels = images.to(device), labels.to(device)

    outputs = model(images[:num_samples])
    _, predicted = torch.max(outputs, 1)

    print("预测结果示例:")
    print("样本\t真实标签\t预测标签")
    print("-" * 30)
    for i in range(num_samples):
        print(f"{i + 1}\t{labels[i].item()}\t\t{predicted[i].item()}")

    plt.figure(figsize=(12, 6))
    for i in range(min(10, num_samples)):
        plt.subplot(2, 5, i + 1)
        img = images[i].cpu() / 2 + 0.5
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)).squeeze(), cmap='gray')
        color = 'green' if predicted[i] == labels[i] else 'red'
        plt.title(f'真实: {labels[i].item()}, 预测: {predicted[i].item()}', color=color)
        plt.axis('off')
    plt.tight_layout()
    plt.savefig('resnet_mnist_predictions.png', dpi=300, bbox_inches='tight')
    print("\n预测结果已保存为 'resnet_mnist_predictions.png'")
    plt.show()


# 主函数（完整补全）
def main():
    """主函数（保持原实现流程）"""
    print("MNIST手写数字识别 - PyTorch ResNet实现")
    print("=" * 50)

    # 数据加载与预处理
    print("\n加载MNIST数据...")
    train_dataset, test_dataset = load_local_mnist("../mnist_data")

    # 创建数据加载器（适配用户环境，批次大小16）
    batch_size = 16
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    # 可视化样本
    print("\n可视化样本数据...")
    visualize_samples(train_loader)

    # 模型初始化（自动适配GPU/CPU）
    print("\n创建ResNet模型...")
    model = ResNet().to(device)
    print(model)  # 打印模型结构

    # 模型训练
    print("\n开始训练模型...")
    train_losses, train_accuracies, test_accuracies = train_model(model, train_loader, test_loader, epochs=5)

    # 训练分析
    print("\n绘制训练历史...")
    plot_training_history(train_losses, train_accuracies, test_accuracies)

    # 测试预测
    print("\n测试预测结果...")
    test_predictions(model, test_loader)

    # 模型保存
    torch.save(model.state_dict(), 'resnet_mnist_model.pth')
    print("\n模型已保存为 'resnet_mnist_model.pth'")

    # 最终评估
    final_accuracy = evaluate_model(model, test_loader)
    print(f"\n最终测试准确率: {final_accuracy:.2f}%")


if __name__ == "__main__":
    main()