{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3b5c9b6c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\coding\\Anaconda3\\envs\\pytorch\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "from torch.optim import lr_scheduler\n",
    "from torchvision import datasets, transforms\n",
    "from torch.autograd import Variable\n",
    "from torchvision import datasets, transforms\n",
    "from torchvision.transforms import ToPILImage\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b667b4db",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyResNet18(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MyResNet18, self).__init__()\n",
    "        # 第一层：卷积层3->64 224->112\n",
    "        self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3)\n",
    "        self.bn1 = nn.BatchNorm2d(64)\n",
    "        # Max Pooling 层64 112->56\n",
    "        self.s1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        # 第二、三层：“实线”卷积层64 56->56\n",
    "        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn2 = nn.BatchNorm2d(64)\n",
    "        self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn3 = nn.BatchNorm2d(64)\n",
    "        # 第四、五层：“实线”卷积层64 56->56\n",
    "        self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn4 = nn.BatchNorm2d(64)\n",
    "        self.conv5 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn5 = nn.BatchNorm2d(64)\n",
    "        # 第六、七层：“虚线”卷积层64->128 56->28\n",
    "        self.conv6_1 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1)\n",
    "        self.bn6_1 = nn.BatchNorm2d(128)\n",
    "        self.conv7_1 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn7_1 = nn.BatchNorm2d(128)\n",
    "        #64->128 56->28\n",
    "        self.conv7 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=1, stride=2, padding=0)\n",
    "        self.bn7 = nn.BatchNorm2d(128)\n",
    "        # 第八、九层：“实线”卷积层128->128 28->28\n",
    "        self.conv8 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn8 = nn.BatchNorm2d(128)\n",
    "        self.conv9 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn9 = nn.BatchNorm2d(128)\n",
    "        # 第十、十一层：“虚线”卷积层128->256 28->14\n",
    "        self.conv10_1 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)\n",
    "        self.bn10_1 = nn.BatchNorm2d(256)\n",
    "        self.conv11_1 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn11_1 = nn.BatchNorm2d(256)\n",
    "        #128->256 28->14\n",
    "        self.conv11 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=1, stride=2, padding=0)\n",
    "        self.bn11 = nn.BatchNorm2d(256)\n",
    "        # 第十二 、十三层：“实线”卷积层256->256 14->14\n",
    "        self.conv12 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn12 = nn.BatchNorm2d(256)\n",
    "        self.conv13 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn13 = nn.BatchNorm2d(256)\n",
    "        # 第十四、十五层：“虚线”卷积层256->512 14->7\n",
    "        self.conv14_1 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)\n",
    "        self.bn14_1 = nn.BatchNorm2d(512)\n",
    "        self.conv15_1 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn15_1 = nn.BatchNorm2d(512)\n",
    "        #256->512 14->7\n",
    "        self.conv15 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1, stride=2, padding=0)\n",
    "        self.bn15 = nn.BatchNorm2d(512)\n",
    "        # 第十六 、十七层：“实线”卷积层512->512 7->7\n",
    "        self.conv16 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn16 = nn.BatchNorm2d(512)\n",
    "        self.conv17 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)\n",
    "        self.bn17 = nn.BatchNorm2d(512)\n",
    "        # avg pooling 层512->512 7->1\n",
    "        self.s2 = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n",
    "        # 第十八层：全连接层\n",
    "        self.Flatten = nn.Flatten()\n",
    "        #512->1000\n",
    "        self.f18 = nn.Linear(512, 1000)\n",
    "        # 为满足该实例另加 ↓ 1000->10\n",
    "        self.f_output = nn.Linear(1000, 10)\n",
    "        \n",
    "    def forward(self, x):              # shape: torch.Size([1, 3, 224, 224])\n",
    "        x = self.conv1(x)              # shape: torch.Size([1, 64, 112, 112])\n",
    "        x = self.bn1(x)                # shape: torch.Size([1, 64, 112, 112])\n",
    "        x = self.s1(x)                 # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.conv2(x)              # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.bn2(x)                # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.conv3(x)              # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.bn3(x)                # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.conv4(x)              # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.bn4(x)                # shape: torch.Size([1, 64, 56, 56])\n",
    "        x = self.conv5(x)              # shape: torch.Size([1, 64, 56, 56])\n",
    "        \n",
    "        x = self.bn5(x)                # shape: torch.Size([1, 64, 56, 56])        \n",
    "        x6_1 = self.conv6_1(x)         # shape: torch.Size([1, 128, 28, 28])\n",
    "        x7_1 = self.conv7_1(x6_1)      # shape: torch.Size([1, 128, 28, 28])\n",
    "        x7 = self.conv7(x)             # shape: torch.Size([1, 128, 28, 28])\n",
    "        x = x7 + x7_1                  # shape: torch.Size([1, 128, 28, 28])\n",
    "        \n",
    "        x = self.conv8(x)              # shape: torch.Size([1, 128, 28, 28])\n",
    "        \n",
    "        x = self.conv9(x)              # shape: torch.Size([1, 128, 28, 28])\n",
    "        x10_1 = self.conv10_1(x)       # shape: torch.Size([1, 256, 14, 14])\n",
    "        x11_1 = self.conv11_1(x10_1)   # shape: torch.Size([1, 256, 14, 14])\n",
    "        x11 = self.conv11(x)           # shape: torch.Size([1, 256, 14, 14])\n",
    "        x = x11 + x11_1                # shape: torch.Size([1, 256, 14, 14])\n",
    "        \n",
    "        x = self.conv12(x)             # shape: torch.Size([1, 256, 14, 14])\n",
    "        \n",
    "        x = self.conv13(x)             # shape: torch.Size([1, 256, 14, 14])\n",
    "        x14_1 = self.conv14_1(x)       # shape: torch.Size([1, 512, 7, 7])\n",
    "        x15_1 = self.conv15_1(x14_1)   # shape: torch.Size([1, 512, 7, 7])\n",
    "        x15 = self.conv15(x)           # shape: torch.Size([1, 512, 7, 7])\n",
    "        x = x15 + x15_1                # shape: torch.Size([1, 512, 7, 7])\n",
    "        \n",
    "        x = self.conv16(x)             # shape: torch.Size([1, 512, 7, 7])\n",
    "        x = self.conv17(x)             # shape: torch.Size([1, 512, 7, 7])\n",
    "        x = self.s2(x)                 # shape: torch.Size([1, 512, 1, 1])\n",
    "        x = self.Flatten(x)            # shape: shape: torch.Size([1, 512])\n",
    "        x = self.f18(x)                # shape: torch.Size([1, 1000])\n",
    "        # 为满足该实例另加 ↓\n",
    "        x = self.f_output(x)           # shape: torch.Size([1, 10])\n",
    "        x = F.softmax(x, dim=1)        # shape: torch.Size([1, 10])\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "13976b20",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "data_transform = transforms.Compose([\n",
    "    #transforms.(8,8),    # 缩放图像大小为 224*224\n",
    "    transforms.Resize([224,224]),\n",
    "    transforms.ToTensor()     # 仅对数据做转换为 tensor 格式操作\n",
    "])\n",
    "\n",
    "# 加载训练数据集\n",
    "train_dataset = datasets.MNIST(root='./data', train=True, transform=data_transform, download=True)\n",
    "# 给训练集创建一个数据集加载器\n",
    "train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=8, shuffle=True)\n",
    "# 加载测试数据集\n",
    "test_dataset = datasets.MNIST(root='./data', train=False, transform=data_transform, download=True)\n",
    "# 给测试集创建一个数据集加载器\n",
    "test_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=8, shuffle=True)\n",
    "\n",
    "# 如果显卡可用，则用显卡进行训练\n",
    "device = \"cuda\" if torch.cuda.is_available() else 'cpu'\n",
    "\n",
    "# 调用 net 里定义的模型，如果 GPU 可用则将模型转到 GPU\n",
    "model = MyResNet18().to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "fe0afeab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义损失函数（交叉熵损失）\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "# 定义优化器（SGD：随机梯度下降）\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)\n",
    "# 学习率每隔 10 个 epoch 变为原来的 0.1\n",
    "lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "d8aac857",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义训练函数\n",
    "def train(dataloader, model, loss_fn, optimizer):\n",
    "    loss, current, n = 0.0, 0.0, 0\n",
    "    for batch, (X, y) in enumerate(dataloader):\n",
    "        # 单通道转为三通道\n",
    "        X = np.array(X)\n",
    "        X = X.transpose((1, 0, 2, 3))             # array 转置\n",
    "        image = np.concatenate((X, X, X), axis=0)\n",
    "        image = image.transpose((1, 0, 2, 3))     # array 转置回来\n",
    "        image = torch.tensor(image)               # 将 numpy 数据格式转为 tensor\n",
    "        # 前向传播\n",
    "        image, y = image.to(device), y.to(device)\n",
    "        output = model(image)\n",
    "        cur_loss = loss_fn(output, y)\n",
    "        _, pred = torch.max(output, axis=1)\n",
    "        cur_acc = torch.sum(y == pred) / output.shape[0]\n",
    "        # 反向传播\n",
    "        optimizer.zero_grad()\n",
    "        cur_loss.backward()\n",
    "        optimizer.step()\n",
    "        loss += cur_loss.item()\n",
    "        current += cur_acc.item()\n",
    "        n = n + 1\n",
    "    print('train_loss：' + str(loss / n))\n",
    "    print('train_acc：' + str(current / n))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "56ac3667",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义测试函数\n",
    "def test(dataloader, model, loss_fn):\n",
    "    # 将模型转换为验证模式\n",
    "    model.eval()\n",
    "    loss, current, n = 0.0, 0.0, 0\n",
    "    # 非训练，推理期用到（测试时模型参数不用更新，所以 no_grad）\n",
    "    with torch.no_grad():\n",
    "        for batch, (X, y) in enumerate(dataloader):\n",
    "            # 单通道转为三通道\n",
    "            X = np.array(X)\n",
    "            X = X.transpose((1, 0, 2, 3))  # array 转置\n",
    "            image = np.concatenate((X, X, X), axis=0)\n",
    "            image = image.transpose((1, 0, 2, 3))  # array 转置回来\n",
    "            image = torch.tensor(image)  # 将 numpy 数据格式转为 tensor\n",
    "            image, y = image.to(device), y.to(device)\n",
    "            output = model(image)\n",
    "            cur_loss = loss_fn(output, y)\n",
    "            _, pred = torch.max(output, axis=1)\n",
    "            cur_acc = torch.sum(y == pred) / output.shape[0]\n",
    "            loss += cur_loss.item()\n",
    "            current += cur_acc.item()\n",
    "            n = n + 1\n",
    "        print('test_loss：' + str(loss / n))\n",
    "        print('test_acc：' + str(current / n))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7de29e2c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\coding\\Anaconda3\\envs\\pytorch\\lib\\site-packages\\torch\\optim\\lr_scheduler.py:138: UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`.  Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate\n",
      "  warnings.warn(\"Detected call of `lr_scheduler.step()` before `optimizer.step()`. \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1\n",
      "----------------------\n"
     ]
    }
   ],
   "source": [
    "epoch = 1\n",
    "for t in range(epoch):\n",
    "    lr_scheduler.step()\n",
    "    print(f\"Epoch {t + 1}\\n----------------------\")\n",
    "    train(train_dataloader, model, loss_fn, optimizer)\n",
    "    test(test_dataloader, model, loss_fn)\n",
    "    torch.save(model.state_dict(), \"save_model/{}model.pth\".format(t))    # 模型保存\n",
    "print(\"Done!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1c2d93bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "data_transform = transforms.Compose([\n",
    "    transforms.Scale(224),     # 缩放图像大小为 224*224\n",
    "    transforms.ToTensor()      # 仅对数据做转换为 tensor 格式操作\n",
    "])\n",
    "\n",
    "# 加载训练数据集\n",
    "train_dataset = datasets.MNIST(root='./data', train=True, transform=data_transform, download=True)\n",
    "# 给训练集创建一个数据集加载器\n",
    "train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=8, shuffle=True)\n",
    "# 加载测试数据集\n",
    "test_dataset = datasets.MNIST(root='./data', train=False, transform=data_transform, download=True)\n",
    "# 给测试集创建一个数据集加载器\n",
    "test_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=8, shuffle=True)\n",
    "\n",
    "# 如果显卡可用，则用显卡进行训练\n",
    "device = \"cuda\" if torch.cuda.is_available() else 'cpu'\n",
    "\n",
    "# 调用 net 里定义的模型，如果 GPU 可用则将模型转到 GPU\n",
    "model = MyResNet18().to(device)\n",
    "# 加载 train.py 里训练好的模型\n",
    "model.load_state_dict(torch.load(\"./save_model/99model.pth\"))\n",
    "\n",
    "# 获取预测结果\n",
    "classes = [\n",
    "    \"0\",\n",
    "    \"1\",\n",
    "    \"2\",\n",
    "    \"3\",\n",
    "    \"4\",\n",
    "    \"5\",\n",
    "    \"6\",\n",
    "    \"7\",\n",
    "    \"8\",\n",
    "    \"9\",\n",
    "]\n",
    "\n",
    "# 把 tensor 转成 Image，方便可视化\n",
    "show = ToPILImage()\n",
    "# 进入验证阶段\n",
    "model.eval()\n",
    "# 对 test_dataset 里 10000 张手写数字图片进行推理\n",
    "for i in range(len(test_dataset)):\n",
    "    x, y = test_dataset[i][0], test_dataset[i][1]\n",
    "    # tensor格式数据可视化\n",
    "    show(x).show()\n",
    "    # 扩展张量维度为 4 维\n",
    "    x = Variable(torch.unsqueeze(x, dim=0).float(), requires_grad=False).to(device)\n",
    "    # 单通道转为三通道\n",
    "    x = x.cpu()\n",
    "    x = np.array(x)\n",
    "    x = x.transpose((1, 0, 2, 3))          # array 转置\n",
    "    x = np.concatenate((x, x, x), axis=0)\n",
    "    x = x.transpose((1, 0, 2, 3))      # array 转置回来\n",
    "    x = torch.tensor(x).to(device)   # 将 numpy 数据格式转为 tensor，并转回 cuda 格式\n",
    "    with torch.no_grad():\n",
    "        pred = model(x)\n",
    "        # 得到预测类别中最高的那一类，再把最高的这一类对应classes中的哪一个标签\n",
    "        predicted, actual = classes[torch.argmax(pred[0])], classes[y]\n",
    "        # 最终输出预测值与真实值\n",
    "        print(f'Predicted: \"{predicted}\", Actual: \"{actual}\"')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
