{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torchvision.transforms as transforms\n",
    "import torchvision.datasets as datasets\n",
    "import torchvision.models as models\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os\n",
    "import random\n",
    "import time\n",
    "\n",
    "BATCH_SIZE = 16\n",
    "NUM_CLASSES = 21\n",
    "NUM_EPOCHS = 10\n",
    "LEARNING_RATE = 0.001"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FCNResNet18(nn.Module):\n",
    "    def __init__(self, num_classes=21):\n",
    "        super(FCNResNet18, self).__init__()\n",
    "        self.num_classes = num_classes\n",
    "        \n",
    "        # 加载预训练的ResNet18模型\n",
    "        resnet = models.resnet18(pretrained=True)\n",
    "        \n",
    "        # 移除最后的全连接层和平均池化层\n",
    "        self.features = nn.Sequential(*list(resnet.children())[:-2])\n",
    "        \n",
    "        # 添加1x1卷积层来减少通道数并匹配类别数\n",
    "        \n",
    "        self.conv1x1 = nn.Conv2d(512, num_classes, kernel_size=1)\n",
    "        \n",
    "        # 添加转置卷积层进行上采样(16倍上采样)\n",
    "        # 因为ResNet18下采样了32倍，但我们希望输出与输入大小相同\n",
    "        # 所以需要16倍上采样(FCN-16s结构)\n",
    "        self.upsample = nn.ConvTranspose2d(\n",
    "            num_classes, num_classes, \n",
    "            kernel_size=32, stride=16, padding=8,\n",
    "            bias=False\n",
    "        )\n",
    "        \n",
    "        # 初始化转置卷积层的权重为双线性插值\n",
    "        self._init_upsample()\n",
    "    def bilinear_kernel(self,in_channels, out_channels, kernel_size):\n",
    "        factor = (kernel_size + 1) // 2\n",
    "        if kernel_size % 2 == 1:\n",
    "            center = factor - 1\n",
    "        else:\n",
    "            center = factor - 0.5\n",
    "        og = (torch.arange(kernel_size).reshape(-1, 1),\n",
    "            torch.arange(kernel_size).reshape(1, -1))\n",
    "        filt = (1 - torch.abs(og[0] - center) / factor) * \\\n",
    "            (1 - torch.abs(og[1] - center) / factor)\n",
    "        weight = torch.zeros((in_channels, out_channels,\n",
    "                            kernel_size, kernel_size))\n",
    "        weight[range(in_channels), range(out_channels), :, :] = filt\n",
    "        return weight\n",
    "        \n",
    "        \n",
    "    def _init_upsample(self):\n",
    "        # # 使用双线性插值初始化转置卷积\n",
    "        # kernel_size = self.upsample.kernel_size[0]\n",
    "        # stride = self.upsample.stride[0]\n",
    "        \n",
    "        # # 计算双线性插值核\n",
    "        # factor = (kernel_size + 1) // 2\n",
    "        # if kernel_size % 2 == 1:\n",
    "        #     center = factor - 1\n",
    "        # else:\n",
    "        #     center = factor - 0.5\n",
    "        # og = torch.arange(kernel_size).float()\n",
    "        # filt = (1 - torch.abs(og - center) / factor)\n",
    "        # kernel = filt[:, None] * filt[None, :]\n",
    "        # kernel = kernel / kernel.sum()\n",
    "        \n",
    "        # # 设置权重\n",
    "        # self.upsample.weight.data.copy_(\n",
    "        #     kernel.expand(self.upsample.in_channels, -1, -1, -1)\n",
    "        # )\n",
    "        W = self.bilinear_kernel(self.num_classes,self.num_classes,32)\n",
    "        self.upsample.weight.data.copy_(W)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        input_size = x.size()[2:]\n",
    "        x = self.features(x)\n",
    "        x = self.conv1x1(x)\n",
    "        x = self.upsample(x)\n",
    "        \n",
    "        # 调整大小以确保与输入完全匹配\n",
    "        if x.size()[2:] != input_size:\n",
    "            x = F.interpolate(x, size=input_size, mode='bilinear', align_corners=True)\n",
    "        \n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\hanson\\.conda\\envs\\pytorch\\lib\\site-packages\\torchvision\\models\\_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
      "  warnings.warn(\n",
      "c:\\Users\\hanson\\.conda\\envs\\pytorch\\lib\\site-packages\\torchvision\\models\\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
      "  warnings.warn(msg)\n"
     ]
    },
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
      "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
      "\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
      "\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
     ]
    }
   ],
   "source": [
    "net = FCNResNet18(num_classes=NUM_CLASSES)\n",
    "optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=255)\n",
    "\n",
    "X = torch.randn(BATCH_SIZE, 3, 360, 480)\n",
    "Y = torch.randint(0, NUM_CLASSES, (BATCH_SIZE, 360, 480))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Got range [-4.8203177..4.8946996].\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<matplotlib.image.AxesImage at 0x25da3d2fac0>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# backward_1\n",
    "Y_hat = net(X)\n",
    "loss = criterion(Y_hat, Y)\n",
    "optimizer.zero_grad()\n",
    "loss.backward()\n",
    "optimizer.step()\n",
    "\n",
    "plt.subplot(1,3,1)\n",
    "plt.imshow(X[0].permute(1,2,0).numpy())\n",
    "# plt.subplot(1,3,2)\n",
    "# plt.imshow(Y[0].permute(1,2,0).numpy())\n",
    "# plt.subplot(1,3,3)\n",
    "# print(Y_hat.shape)\n",
    "# plt.imshow(Y_hat[0].permute(1,2,0).numpy())"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
