{
 "cells": [
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import json\n",
    "from collections import defaultdict\n",
    "from torchvision import datasets, transforms"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "12cfa4112ffb3a7d"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def get_data_loaders(batch_size=128):\n",
    "    \"\"\"加载MNIST数据集\"\"\"\n",
    "    transform = transforms.Compose([\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize((0.1307,), (0.3081,))\n",
    "    ])\n",
    "\n",
    "    train_loader = torch.utils.data.DataLoader(\n",
    "        datasets.MNIST('data', train=True, download=True, transform=transform),\n",
    "        batch_size=batch_size, shuffle=True\n",
    "    )\n",
    "\n",
    "    test_loader = torch.utils.data.DataLoader(\n",
    "        datasets.MNIST('data', train=False, transform=transform),\n",
    "        batch_size=batch_size, shuffle=False\n",
    "    )\n",
    "\n",
    "    return train_loader, test_loader\n",
    "\n",
    "\n",
    "train_loader, test_loader = get_data_loaders()\n",
    "print(f\"训练集样本数: {len(train_loader.dataset)}\")\n",
    "print(f\"测试集样本数: {len(test_loader.dataset)}\")\n",
    "print(f\"图像尺寸: {train_loader.dataset[0][0].shape}\")"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c93731bab288db29"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def visualize_samples(loader, num_samples=10):\n",
    "    \"\"\"可视化数据样本\"\"\"\n",
    "    dataiter = iter(loader)\n",
    "    images, labels = next(dataiter)\n",
    "\n",
    "    plt.figure(figsize=(15, 3))\n",
    "    for i in range(num_samples):\n",
    "        plt.subplot(1, num_samples, i + 1)\n",
    "        plt.imshow(images[i].squeeze(), cmap='gray')\n",
    "        plt.title(f'Label: {labels[i].item()}')\n",
    "        plt.axis('off')\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "train_loader, test_loader = get_data_loaders()\n",
    "visualize_samples(train_loader)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9413dde5c73934ed"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "class BasicMLP(nn.Module):\n",
    "    \"\"\"基础多层感知机模型\"\"\"\n",
    "\n",
    "    def __init__(self, input_size=784, hidden_sizes=[128], output_size=10, activation='relu'):\n",
    "        super(BasicMLP, self).__init__()\n",
    "\n",
    "        layers = []\n",
    "        prev_size = input_size\n",
    "\n",
    "        for hidden_size in hidden_sizes:\n",
    "            layers.append(nn.Linear(prev_size, hidden_size))\n",
    "            if activation == 'relu':\n",
    "                layers.append(nn.ReLU())\n",
    "            elif activation == 'sigmoid':\n",
    "                layers.append(nn.Sigmoid())\n",
    "            elif activation == 'tanh':\n",
    "                layers.append(nn.Tanh())\n",
    "            layers.append(nn.Dropout(0.2))  # 添加dropout防止过拟合\n",
    "            prev_size = hidden_size\n",
    "\n",
    "        layers.append(nn.Linear(prev_size, output_size))\n",
    "        self.network = nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x.view(x.size(0), -1)  # 展平输入\n",
    "        return self.network(x)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "d1dac7ca796663a4"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "class ModelTrainer:\n",
    "    \"\"\"模型训练器\"\"\"\n",
    "\n",
    "    def __init__(self, model, optimizer, criterion):\n",
    "        self.model = model\n",
    "        self.optimizer = optimizer\n",
    "        self.criterion = criterion\n",
    "        self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "        self.model.to(self.device)\n",
    "        self.train_losses = []\n",
    "        self.train_accuracies = []\n",
    "        self.test_losses = []\n",
    "        self.test_accuracies = []\n",
    "\n",
    "    def train_epoch(self, train_loader):\n",
    "        \"\"\"训练一个epoch\"\"\"\n",
    "        self.model.train()\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        for batch_idx, (data, target) in enumerate(train_loader):\n",
    "            data, target = data.to(self.device), target.to(self.device)\n",
    "\n",
    "            self.optimizer.zero_grad()\n",
    "            output = self.model(data)\n",
    "            loss = self.criterion(output, target)\n",
    "            loss.backward()\n",
    "            self.optimizer.step()\n",
    "\n",
    "            running_loss += loss.item()\n",
    "            _, predicted = output.max(1)\n",
    "            total += target.size(0)\n",
    "            correct += predicted.eq(target).sum().item()\n",
    "\n",
    "        epoch_loss = running_loss / len(train_loader)\n",
    "        epoch_acc = 100. * correct / total\n",
    "\n",
    "        return epoch_loss, epoch_acc\n",
    "\n",
    "    def test(self, test_loader):\n",
    "        \"\"\"测试模型性能\"\"\"\n",
    "        self.model.eval()\n",
    "        test_loss = 0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "\n",
    "        with torch.no_grad():\n",
    "            for data, target in test_loader:\n",
    "                data, target = data.to(self.device), target.to(self.device)\n",
    "                output = self.model(data)\n",
    "                test_loss += self.criterion(output, target).item()\n",
    "                _, predicted = output.max(1)\n",
    "                total += target.size(0)\n",
    "                correct += predicted.eq(target).sum().item()\n",
    "\n",
    "        test_loss /= len(test_loader)\n",
    "        test_acc = 100. * correct / total\n",
    "\n",
    "        return test_loss, test_acc\n",
    "\n",
    "    def train(self, train_loader, test_loader, epochs=10):\n",
    "        \"\"\"训练\"\"\"\n",
    "        print(f\"{'Epoch':^6} | {'Train Loss':^12} | {'Train Acc':^10} | {'Test Loss':^10} | {'Test Acc':^10}\")\n",
    "        print(\"-\" * 70)\n",
    "\n",
    "        for epoch in range(1, epochs + 1):\n",
    "            train_loss, train_acc = self.train_epoch(train_loader)\n",
    "            test_loss, test_acc = self.test(test_loader)\n",
    "\n",
    "            self.train_losses.append(train_loss)\n",
    "            self.train_accuracies.append(train_acc)\n",
    "            self.test_losses.append(test_loss)\n",
    "            self.test_accuracies.append(test_acc)\n",
    "\n",
    "            print(f\"{epoch:^6} | {train_loss:^12.4f} | {train_acc:^10.2f}% | {test_loss:^10.4f} | {test_acc:^10.2f}%\")\n",
    "\n",
    "    def plot_results(self, title):\n",
    "        \"\"\"训练结果可视化\"\"\"\n",
    "        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))\n",
    "\n",
    "        # 损失\n",
    "        ax1.plot(self.train_losses, label='Training Loss')\n",
    "        ax1.set_title(f'{title} - Training Loss')\n",
    "        ax1.set_xlabel('Epoch')\n",
    "        ax1.set_ylabel('Loss')\n",
    "        ax1.legend()\n",
    "        ax1.grid(True)\n",
    "        ax2.plot(self.test_losses, label='Test Loss', color='orange')\n",
    "        ax2.set_title(f'{title} - Test Loss')\n",
    "        ax2.set_xlabel('Epoch')\n",
    "        ax2.set_ylabel('Loss')\n",
    "        ax2.legend()\n",
    "        ax2.grid(True)\n",
    "\n",
    "        # 准确率\n",
    "        ax3.plot(self.train_accuracies, label='Training Accuracy', color='green')\n",
    "        ax3.set_title(f'{title} - Training Accuracy')\n",
    "        ax3.set_xlabel('Epoch')\n",
    "        ax3.set_ylabel('Accuracy (%)')\n",
    "        ax3.legend()\n",
    "        ax3.grid(True)\n",
    "        ax4.plot(self.test_accuracies, label='Test Accuracy', color='red')\n",
    "        ax4.set_title(f'{title} - Test Accuracy')\n",
    "        ax4.set_xlabel('Epoch')\n",
    "        ax4.set_ylabel('Accuracy (%)')\n",
    "        ax4.legend()\n",
    "        ax4.grid(True)\n",
    "\n",
    "        plt.tight_layout()\n",
    "        plt.show()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "62925ad60aac11e4"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def experiment_basic_mlp():\n",
    "    print(\"=== 实验1: 基础单隐藏层MLP ===\")\n",
    "    train_loader, test_loader = get_data_loaders(batch_size=128)\n",
    "    model = BasicMLP(hidden_sizes=[128], activation='relu')\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    trainer = ModelTrainer(model, optimizer, criterion)\n",
    "    trainer.train(train_loader, test_loader, epochs=15)\n",
    "    trainer.plot_results(\"Basic MLP (128 hidden units)\")\n",
    "\n",
    "    return trainer\n",
    "\n",
    "\n",
    "basic_trainer = experiment_basic_mlp()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "85c4006b77336a2b"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def experiment_network_structures():\n",
    "    \"\"\"网络结构对比实验\"\"\"\n",
    "    print(\"=== 实验2: 网络结构影响 ===\")\n",
    "\n",
    "    structures = {\n",
    "        'Small (64)': [64],\n",
    "        'Medium (128)': [128],\n",
    "        'Large (256)': [256],\n",
    "        'Deep (64-32)': [64, 32],\n",
    "        'Deep (128-64)': [128, 64],\n",
    "        'Deep (256-128)': [256, 128]\n",
    "    }\n",
    "\n",
    "    results = {}\n",
    "    train_loader, test_loader = get_data_loaders(batch_size=128)\n",
    "\n",
    "    for name, hidden_sizes in structures.items():\n",
    "        print(f\"\\n--- 测试网络结构: {name} ---\")\n",
    "\n",
    "        model = BasicMLP(hidden_sizes=hidden_sizes, activation='relu')\n",
    "        optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "        # （简化版，只训练5个epoch用于对比）\n",
    "        trainer = ModelTrainer(model, optimizer, criterion)\n",
    "\n",
    "        for epoch in range(1, 6):\n",
    "            train_loss, train_acc = trainer.train_epoch(train_loader)\n",
    "            test_loss, test_acc = trainer.test(test_loader)\n",
    "\n",
    "            if epoch == 5:\n",
    "                results[name] = {\n",
    "                    'train_acc': train_acc,\n",
    "                    'test_acc': test_acc,\n",
    "                    'train_loss': train_loss,\n",
    "                    'test_loss': test_loss\n",
    "                }\n",
    "\n",
    "        print(f\"{name}: 训练准确率: {train_acc:.2f}%, 测试准确率: {test_acc:.2f}%\")\n",
    "\n",
    "    # 绘制结果\n",
    "    names = list(results.keys())\n",
    "    train_accs = [results[name]['train_acc'] for name in names]\n",
    "    test_accs = [results[name]['test_acc'] for name in names]\n",
    "\n",
    "    plt.figure(figsize=(12, 6))\n",
    "    x = np.arange(len(names))\n",
    "    width = 0.35\n",
    "\n",
    "    plt.bar(x - width / 2, train_accs, width, label='Training Accuracy', alpha=0.7)\n",
    "    plt.bar(x + width / 2, test_accs, width, label='Test Accuracy', alpha=0.7)\n",
    "\n",
    "    plt.xlabel('Network Structure')\n",
    "    plt.ylabel('Accuracy (%)')\n",
    "    plt.title('Network Structure Comparison')\n",
    "    plt.xticks(x, names, rotation=45)\n",
    "    plt.legend()\n",
    "    plt.grid(True, alpha=0.3)\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return results\n",
    "\n",
    "\n",
    "structure_results = experiment_network_structures()\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "bf44a38c7cda74c0"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def experiment_activation_functions():\n",
    "    \"\"\"激活函数对比实验\"\"\"\n",
    "    print(\"=== 实验3: 激活函数对比 ===\")\n",
    "\n",
    "    activations = ['relu', 'sigmoid', 'tanh']\n",
    "    results = {}\n",
    "    train_loader, test_loader = get_data_loaders(batch_size=128)\n",
    "\n",
    "    for activation in activations:\n",
    "        print(f\"\\n--- 测试激活函数: {activation} ---\")\n",
    "\n",
    "        model = BasicMLP(hidden_sizes=[128], activation=activation)\n",
    "        optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "        trainer = ModelTrainer(model, optimizer, criterion)\n",
    "\n",
    "        epoch_losses = []\n",
    "        epoch_accs = []\n",
    "\n",
    "        for epoch in range(1, 11):\n",
    "            train_loss, train_acc = trainer.train_epoch(train_loader)\n",
    "            test_loss, test_acc = trainer.test(test_loader)\n",
    "            epoch_losses.append(test_loss)\n",
    "            epoch_accs.append(test_acc)\n",
    "\n",
    "            if epoch == 10:\n",
    "                results[activation] = {\n",
    "                    'final_acc': test_acc,\n",
    "                    'loss_curve': epoch_losses,\n",
    "                    'acc_curve': epoch_accs\n",
    "                }\n",
    "\n",
    "        print(f\"{activation}: 最终测试准确率: {test_acc:.2f}%\")\n",
    "\n",
    "    plt.figure(figsize=(15, 5))\n",
    "\n",
    "    plt.subplot(1, 2, 1)\n",
    "    for activation in activations:\n",
    "        plt.plot(results[activation]['loss_curve'], label=activation.upper())\n",
    "    plt.xlabel('Epoch')\n",
    "    plt.ylabel('Test Loss')\n",
    "    plt.title('Activation Functions - Loss Curves')\n",
    "    plt.legend()\n",
    "    plt.grid(True)\n",
    "\n",
    "    plt.subplot(1, 2, 2)\n",
    "    for activation in activations:\n",
    "        plt.plot(results[activation]['acc_curve'], label=activation.upper())\n",
    "    plt.xlabel('Epoch')\n",
    "    plt.ylabel('Test Accuracy (%)')\n",
    "    plt.title('Activation Functions - Accuracy Curves')\n",
    "    plt.legend()\n",
    "    plt.grid(True)\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return results\n",
    "\n",
    "\n",
    "activation_results = experiment_activation_functions()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "52978186bc1a87f"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def experiment_optimizers():\n",
    "    \"\"\"优化器对比实验\"\"\"\n",
    "    print(\"=== 实验4: 优化策略对比 ===\")\n",
    "\n",
    "    optimizers_config = {\n",
    "        'SGD (lr=0.01)': {'optimizer': optim.SGD, 'lr': 0.01},\n",
    "        'SGD (lr=0.01, momentum=0.9)': {'optimizer': optim.SGD, 'lr': 0.01, 'momentum': 0.9},\n",
    "        'Adam (lr=0.001)': {'optimizer': optim.Adam, 'lr': 0.001},\n",
    "        'RMSprop (lr=0.001)': {'optimizer': optim.RMSprop, 'lr': 0.001}\n",
    "    }\n",
    "\n",
    "    results = {}\n",
    "    train_loader, test_loader = get_data_loaders(batch_size=128)\n",
    "\n",
    "    for name, config in optimizers_config.items():\n",
    "        print(f\"\\n--- 测试优化器: {name} ---\")\n",
    "        model = BasicMLP(hidden_sizes=[128], activation='relu')\n",
    "        if config['optimizer'] == optim.SGD:\n",
    "            if 'momentum' in config:\n",
    "                optimizer = config['optimizer'](model.parameters(), lr=config['lr'], momentum=config['momentum'])\n",
    "            else:\n",
    "                optimizer = config['optimizer'](model.parameters(), lr=config['lr'])\n",
    "        else:\n",
    "            optimizer = config['optimizer'](model.parameters(), lr=config['lr'])\n",
    "\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        trainer = ModelTrainer(model, optimizer, criterion)\n",
    "        epoch_accs = []\n",
    "\n",
    "        for epoch in range(1, 11):\n",
    "            trainer.train_epoch(train_loader)\n",
    "            _, test_acc = trainer.test(test_loader)\n",
    "            epoch_accs.append(test_acc)\n",
    "\n",
    "            if epoch == 10:\n",
    "                results[name] = {\n",
    "                    'final_acc': test_acc,\n",
    "                    'acc_curve': epoch_accs\n",
    "                }\n",
    "\n",
    "        print(f\"{name}: 最终测试准确率: {test_acc:.2f}%\")\n",
    "    plt.figure(figsize=(15, 6))\n",
    "\n",
    "    # 准确率曲线对比\n",
    "    plt.subplot(1, 2, 1)\n",
    "    for name in optimizers_config.keys():\n",
    "        plt.plot(results[name]['acc_curve'], label=name)\n",
    "    plt.xlabel('Epoch')\n",
    "    plt.ylabel('Test Accuracy (%)')\n",
    "    plt.title('Optimizers Comparison - Accuracy Curves')\n",
    "    plt.legend()\n",
    "    plt.grid(True)\n",
    "\n",
    "    # 最终准确率对比\n",
    "    plt.subplot(1, 2, 2)\n",
    "    names = list(results.keys())\n",
    "    accs = [results[name]['final_acc'] for name in names]\n",
    "\n",
    "    bars = plt.bar(names, accs, alpha=0.7)\n",
    "    plt.xlabel('Optimizer')\n",
    "    plt.ylabel('Final Test Accuracy (%)')\n",
    "    plt.title('Optimizers Comparison - Final Accuracy')\n",
    "    plt.xticks(rotation=45)\n",
    "\n",
    "    for bar, acc in zip(bars, accs):\n",
    "        plt.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.1,\n",
    "                 f'{acc:.2f}%', ha='center', va='bottom')\n",
    "\n",
    "    plt.grid(True, alpha=0.3)\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return results\n",
    "\n",
    "\n",
    "optimizer_results = experiment_optimizers()"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f6632368edd05075"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "def final_experiment():\n",
    "    print(\"=== 最终实验: 综合最佳配置 ===\")\n",
    "\n",
    "    # 使用前面实验的最佳配置\n",
    "    train_loader, test_loader = get_data_loaders(batch_size=128)\n",
    "\n",
    "    model = BasicMLP(hidden_sizes=[256, 128], activation='relu')\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)  # 添加L2正则化\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "    trainer = ModelTrainer(model, optimizer, criterion)\n",
    "    trainer.train(train_loader, test_loader, epochs=20)\n",
    "    trainer.plot_results(\"Optimized MLP (256-128 hidden units)\")\n",
    "\n",
    "    final_test_loss, final_test_acc = trainer.test(test_loader)\n",
    "    print(f\"\\n最终模型性能:\")\n",
    "    print(f\"测试损失: {final_test_loss:.4f}\")\n",
    "    print(f\"测试准确率: {final_test_acc:.2f}%\")\n",
    "\n",
    "    return trainer, final_test_acc\n",
    "\n",
    "\n",
    "def error_analysis(model, test_loader):\n",
    "    \"\"\"错误分析\"\"\"\n",
    "    model.eval()\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    misclassified = []\n",
    "    confusion_matrix = torch.zeros(10, 10)\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for data, target in test_loader:\n",
    "            data, target = data.to(device), target.to(device)\n",
    "            output = model(data)\n",
    "            pred = output.argmax(dim=1)\n",
    "            wrong_idx = (pred != target)\n",
    "            for i in range(wrong_idx.sum()):\n",
    "                idx = wrong_idx.nonzero()[i]\n",
    "                misclassified.append({\n",
    "                    'image': data[idx].cpu().squeeze(),\n",
    "                    'true': target[idx].item(),\n",
    "                    'pred': pred[idx].item()\n",
    "                })\n",
    "            for t, p in zip(target.view(-1), pred.view(-1)):\n",
    "                confusion_matrix[t.long(), p.long()] += 1\n",
    "\n",
    "    # 显示部分错误分类样本\n",
    "    print(f\"\\n错误分类样本数: {len(misclassified)}\")\n",
    "    plt.figure(figsize=(15, 6))\n",
    "    for i in range(min(10, len(misclassified))):\n",
    "        plt.subplot(2, 5, i + 1)\n",
    "        plt.imshow(misclassified[i]['image'], cmap='gray')\n",
    "        plt.title(f'True: {misclassified[i][\"true\"]}, Pred: {misclassified[i][\"pred\"]}')\n",
    "        plt.axis('off')\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return misclassified, confusion_matrix\n",
    "\n",
    "\n",
    "final_trainer, final_accuracy = final_experiment()\n",
    "misclassified_samples, conf_matrix = error_analysis(final_trainer.model, test_loader)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "d0c3e9202b69c784"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
