{
  "cells":[
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "# 模型性能对比分析\n",
        "\n",
        "本笔记本将展示如何替换模型并比较不同模型在相同任务上的性能表现。我们将使用Fashion-MNIST数据集进行图像分类任务。"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":1,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 导入必要的库\n",
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.optim as optim\n",
        "import torchvision\n",
        "import torchvision.transforms as transforms\n",
        "from torch.utils.data import DataLoader\n",
        "import matplotlib.pyplot as plt\n",
        "import numpy as np\n",
        "import time\n",
        "from sklearn.metrics import accuracy_score, classification_report\n",
        "import pandas as pd\n",
        "import seaborn as sns\n",
        "\n",
        "# 设置随机种子以确保可重复性\n",
        "torch.manual_seed(42)\n",
        "np.random.seed(42)\n",
        "\n",
        "# 设置绘图风格\n",
        "plt.style.use('seaborn-v0_8')\n",
        "sns.set_palette(\"husl\")\n",
        "\n",
        "# 检查是否有可用的GPU\n",
        "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "print(f\"使用设备: {device}\")"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 数据准备"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":2,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 数据预处理\n",
        "transform = transforms.Compose([\n",
        " transforms.ToTensor(),\n",
        " transforms.Normalize((0.5,), (0.5,))\n",
        "])\n",
        "\n",
        "# 下载并加载Fashion-MNIST数据集\n",
        "train_set = torchvision.datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform)\n",
        "test_set = torchvision.datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform)\n",
        "\n",
        "# 创建数据加载器\n",
        "batch_size = 64\n",
        "train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n",
        "test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n",
        "\n",
        "# 类别名称\n",
        "classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \n",
        " 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 模型定义"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":3,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 基础CNN模型\n",
        "class BaseCNN(nn.Module):\n",
        " def __init__(self, num_classes=10):\n",
        " super(BaseCNN, self).__init__()\n",
        " self.conv1 = nn.Conv2d(1, 32, 3, padding=1)\n",
        " self.conv2 = nn.Conv2d(32, 64, 3, padding=1)\n",
        " self.pool = nn.MaxPool2d(2, 2)\n",
        " self.fc1 = nn.Linear(64 * 7 * 7, 128)\n",
        " self.fc2 = nn.Linear(128, num_classes)\n",
        " self.dropout = nn.Dropout(0.5)\n",
        " self.relu = nn.ReLU()\n",
        "\n",
        " def forward(self, x):\n",
        " x = self.pool(self.relu(self.conv1(x)))\n",
        " x = self.pool(self.relu(self.conv2(x)))\n",
        " x = x.view(-1, 64 * 7 * 7)\n",
        " x = self.relu(self.fc1(x))\n",
        " x = self.dropout(x)\n",
        " x = self.fc2(x)\n",
        " return x\n",
        "\n",
        "# ResNet风格模型\n",
        "class ResidualBlock(nn.Module):\n",
        " def __init__(self, in_channels, out_channels, stride=1):\n",
        " super(ResidualBlock, self).__init__()\n",
        " self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n",
        " self.bn1 = nn.BatchNorm2d(out_channels)\n",
        " self.relu = nn.ReLU(inplace=True)\n",
        " self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)\n",
        " self.bn2 = nn.BatchNorm2d(out_channels)\n",
        " \n",
        " self.shortcut = nn.Sequential()\n",
        " if stride != 1 or in_channels != out_channels:\n",
        " self.shortcut = nn.Sequential(\n",
        " nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),\n",
        " nn.BatchNorm2d(out_channels)\n",
        " )\n",
        "\n",
        " def forward(self, x):\n",
        " residual = x\n",
        " out = self.conv1(x)\n",
        " out = self.bn1(out)\n",
        " out = self.relu(out)\n",
        " out = self.conv2(out)\n",
        " out = self.bn2(out)\n",
        " out += self.shortcut(residual)\n",
        " out = self.relu(out)\n",
        " return out\n",
        "\n",
        "class ResNetStyle(nn.Module):\n",
        " def __init__(self, num_classes=10):\n",
        " super(ResNetStyle, self).__init__()\n",
        " self.in_channels = 32\n",
        " self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=False)\n",
        " self.bn1 = nn.BatchNorm2d(32)\n",
        " self.relu = nn.ReLU(inplace=True)\n",
        " self.layer1 = self.make_layer(32, 2, stride=1)\n",
        " self.layer2 = self.make_layer(64, 2, stride=2)\n",
        " self.layer3 = self.make_layer(128, 2, stride=2)\n",
        " self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n",
        " self.fc = nn.Linear(128, num_classes)\n",
        "\n",
        " def make_layer(self, out_channels, blocks, stride):\n",
        " strides = [stride] + [1] * (blocks - 1)\n",
        " layers = []\n",
        " for stride in strides:\n",
        " layers.append(ResidualBlock(self.in_channels, out_channels, stride))\n",
        " self.in_channels = out_channels\n",
        " return nn.Sequential(*layers)\n",
        "\n",
        " def forward(self, x):\n",
        " x = self.conv1(x)\n",
        " x = self.bn1(x)\n",
        " x = self.relu(x)\n",
        " x = self.layer1(x)\n",
        " x = self.layer2(x)\n",
        " x = self.layer3(x)\n",
        " x = self.avg_pool(x)\n",
        " x = x.view(x.size(0), -1)\n",
        " x = self.fc(x)\n",
        " return x\n",
        "\n",
        "# 更深的CNN模型\n",
        "class DeepCNN(nn.Module):\n",
        " def __init__(self, num_classes=10):\n",
        " super(DeepCNN, self).__init__()\n",
        " self.features = nn.Sequential(\n",
        " nn.Conv2d(1, 32, 3, padding=1),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm2d(32),\n",
        " nn.Conv2d(32, 32, 3, padding=1),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm2d(32),\n",
        " nn.MaxPool2d(2),\n",
        " nn.Dropout2d(0.25),\n",
        " \n",
        " nn.Conv2d(32, 64, 3, padding=1),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm2d(64),\n",
        " nn.Conv2d(64, 64, 3, padding=1),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm2d(64),\n",
        " nn.MaxPool2d(2),\n",
        " nn.Dropout2d(0.25),\n",
        " \n",
        " nn.Conv2d(64, 128, 3, padding=1),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm2d(128),\n",
        " nn.Conv2d(128, 128, 3, padding=1),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm2d(128),\n",
        " nn.MaxPool2d(2),\n",
        " nn.Dropout2d(0.25),\n",
        " )\n",
        " \n",
        " self.classifier = nn.Sequential(\n",
        " nn.Linear(128 * 3 * 3, 512),\n",
        " nn.ReLU(),\n",
        " nn.BatchNorm1d(512),\n",
        " nn.Dropout(0.5),\n",
        " nn.Linear(512, num_classes)\n",
        " )\n",
        "\n",
        " def forward(self, x):\n",
        " x = self.features(x)\n",
        " x = x.view(x.size(0), -1)\n",
        " x = self.classifier(x)\n",
        " return x"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 训练和评估函数"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":4,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "def train_model(model, train_loader, criterion, optimizer, num_epochs=10):\n",
        " model.train()\n",
        " train_losses = []\n",
        " start_time = time.time()\n",
        " \n",
        " for epoch in range(num_epochs):\n",
        " running_loss = 0.0\n",
        " for i, (inputs, labels) in enumerate(train_loader):\n",
        " inputs, labels = inputs.to(device), labels.to(device)\n",
        " \n",
        " optimizer.zero_grad()\n",
        " outputs = model(inputs)\n",
        " loss = criterion(outputs, labels)\n",
        " loss.backward()\n",
        " optimizer.step()\n",
        " \n",
        " running_loss += loss.item()\n",
        " \n",
        " if (i+1) % 200 == 0:\n",
        " print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(train_loader)}], Loss: {loss.item():.4f}')\n",
        " \n",
        " epoch_loss = running_loss / len(train_loader)\n",
        " train_losses.append(epoch_loss)\n",
        " print(f'Epoch [{epoch+1}/{num_epochs}], Average Loss: {epoch_loss:.4f}')\n",
        " \n",
        " training_time = time.time() - start_time\n",
        " print(f'Training completed in {training_time:.2f} seconds')\n",
        " return train_losses, training_time\n",
        "\n",
        "def evaluate_model(model, test_loader):\n",
        " model.eval()\n",
        " all_preds = []\n",
        " all_labels = []\n",
        " \n",
        " with torch.no_grad():\n",
        " for inputs, labels in test_loader:\n",
        " inputs, labels = inputs.to(device), labels.to(device)\n",
        " outputs = model(inputs)\n",
        " _, preds = torch.max(outputs, 1)\n",
        " \n",
        " all_preds.extend(preds.cpu().numpy())\n",
        " all_labels.extend(labels.cpu().numpy())\n",
        " \n",
        " accuracy = accuracy_score(all_labels, all_preds)\n",
        " report = classification_report(all_labels, all_preds, target_names=classes, output_dict=True)\n",
        " return accuracy, report"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 模型比较"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":5,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 定义要比较的模型\n",
        "models = {\n",
        " \"BaseCNN\": BaseCNN().to(device),\n",
        " \"ResNetStyle\": ResNetStyle().to(device),\n",
        " \"DeepCNN\": DeepCNN().to(device)\n",
        "}\n",
        "\n",
        "# 训练参数\n",
        "num_epochs = 10\n",
        "criterion = nn.CrossEntropyLoss()\n",
        "\n",
        "# 存储结果\n",
        "results = {}"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":6,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 训练和评估每个模型\n",
        "for name, model in models.items():\n",
        " print(f\"\\n=== 训练 {name} 模型 ===\")\n",
        " \n",
        " # 使用相同的优化器设置进行公平比较\n",
        " optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
        " \n",
        " # 训练模型\n",
        " train_losses, training_time = train_model(model, train_loader, criterion, optimizer, num_epochs)\n",
        " \n",
        " # 评估模型\n",
        " accuracy, report = evaluate_model(model, test_loader)\n",
        " \n",
        " # 存储结果\n",
        " results[name] = {\n",
        " \"train_losses\": train_losses,\n",
        " \"training_time\": training_time,\n",
        " \"accuracy\": accuracy,\n",
        " \"report\": report\n",
        " }\n",
        " \n",
        " print(f\"{name} 准确率: {accuracy:.4f}\")\n",
        " print(f\"{name} 训练时间: {training_time:.2f} 秒\")"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 结果可视化"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":7,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 绘制训练损失曲线\n",
        "plt.figure(figsize=(12, 5))\n",
        "\n",
        "plt.subplot(1, 2, 1)\n",
        "for name, result in results.items():\n",
        " plt.plot(result['train_losses'], label=name)\n",
        "plt.title('训练损失曲线')\n",
        "plt.xlabel('Epoch')\n",
        "plt.ylabel('Loss')\n",
        "plt.legend()\n",
        "\n",
        "# 绘制准确率比较\n",
        "plt.subplot(1, 2, 2)\n",
        "names = list(results.keys())\n",
        "accuracies = [results[name]['accuracy'] for name in names]\n",
        "plt.bar(names, accuracies)\n",
        "plt.title('测试集准确率比较')\n",
        "plt.ylabel('Accuracy')\n",
        "plt.ylim(0.8, 0.9)\n",
        "\n",
        "plt.tight_layout()\n",
        "plt.show()"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":8,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 创建详细结果表格\n",
        "result_table = pd.DataFrame({\n",
        " 'Model': names,\n",
        " 'Accuracy': accuracies,\n",
        " 'Training Time (s)': [results[name]['training_time'] for name in names],\n",
        " 'Final Training Loss': [results[name]['train_losses'][-1] for name in names]\n",
        "})\n",
        "\n",
        "print(\"模型性能比较:\")\n",
        "print(result_table)\n",
        "\n",
        "# 打印每个模型的详细分类报告\n",
        "for name in names:\n",
        " print(f\"\\n{name} 分类报告:\")\n",
        " print(pd.DataFrame(results[name]['report']).transpose().round(4))"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 模型参数数量比较"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":9,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 计算每个模型的参数数量\n",
        "for name, model in models.items():\n",
        " total_params = sum(p.numel() for p in model.parameters())\n",
        " trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
        " print(f\"{name} 总参数: {total_params:,}, 可训练参数: {trainable_params:,}\")\n",
        "\n",
        "# 绘制参数数量与准确率的关系\n",
        "plt.figure(figsize=(10, 6))\n",
        "param_counts = [sum(p.numel() for p in model.parameters()) for model in models.values()]\n",
        "\n",
        "plt.scatter(param_counts, accuracies, s=100)\n",
        "for i, name in enumerate(names):\n",
        " plt.annotate(name, (param_counts[i], accuracies[i]), xytext=(5, 5), textcoords='offset points')\n",
        "\n",
        "plt.xlabel('参数数量')\n",
        "plt.ylabel('准确率')\n",
        "plt.title('模型复杂度与性能关系')\n",
        "plt.grid(True)\n",
        "plt.show()"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 保存结果和模型"
      ]
    },
    {
      "cell_type":"code",
      "execution_count":10,
      "metadata":{
      },
      "outputs":[
      ],
      "source":[
        "# 保存结果到CSV文件\n",
        "result_table.to_csv('model_comparison_results.csv', index=False)\n",
        "print(\"结果已保存到 model_comparison_results.csv\")\n",
        "\n",
        "# 保存最佳模型\n",
        "best_model_name = max(results, key=lambda x: results[x]['accuracy'])\n",
        "best_model = models[best_model_name]\n",
        "torch.save(best_model.state_dict(), f'best_model_{best_model_name}.pth')\n",
        "print(f\"最佳模型 {best_model_name} 已保存\")"
      ]
    },
    {
      "cell_type":"markdown",
      "metadata":{
      },
      "source":[
        "## 结论\n",
        "\n",
        "根据上述分析，我们可以得出以下结论：\n",
        "\n",
        "1. **BaseCNN**：简单的CNN架构，训练速度快，但准确率相对较低\n",
        "2. **ResNetStyle**：使用残差连接的模型，具有更好的梯度流动，准确率较高\n",
        "3. **DeepCNN**：更深的CNN架构，参数更多，训练时间较长，但可能获得更好的性能\n",
        "\n",
        "通过比较训练时间、准确率和参数数量，我们可以选择最适合特定应用场景的模型。通常需要在模型复杂度、训练时间和性能之间找到平衡。"
      ]
    }
  ],
  "metadata":{
    "kernelspec":{
      "display_name":"Python 3",
      "language":"python",
      "name":"python3"
    },
    "language_info":{
      "codemirror_mode":{
        "name":"ipython",
        "version":3
      },
      "file_extension":".py",
      "mimetype":"text/x-python",
      "name":"python",
      "nbconvert_exporter":"python",
      "pygments_lexer":"ipython3",
      "version":"3.8.5"
    }
  },
  "nbformat":4,
  "nbformat_minor":4
}