{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "9a634b72-4626-4700-8bc1-5c947bc868eb",
   "metadata": {},
   "source": [
    "# 第一步导入相关模块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "314879e8-8233-4f56-8670-656c30baf791",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os \n",
    "import torch \n",
    "import torch. nn. functional as F \n",
    "from torchvision import transforms \n",
    "from PIL import Image \n",
    "import matplotlib. pyplot as plt \n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "108349c5-4634-4141-a575-bcfbef1fd9b2",
   "metadata": {},
   "source": [
    "# 第二步模型搭建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "0ec4ee90-1867-4bbf-b70e-e82dd3925259",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torchvision import models\n",
    "\n",
    "def convrelu(in_channels, out_channels, kernel, padding):\n",
    "    return nn.Sequential(\n",
    "        nn.Conv2d(in_channels, out_channels, kernel, padding = padding),\n",
    "        nn.ReLU(inplace = True),\n",
    "    )\n",
    "\n",
    "class ResNetUNet(nn.Module):\n",
    "    def __init__(self, n_class):\n",
    "        super().__init__()\n",
    "\n",
    "        self.base_model = models.resnet18(pretrained = True)\n",
    "        self.base_layers = list(self.base_model.children())\n",
    "        \n",
    "        self.layer0 = nn.Sequential(*self.base_layers[:3])\n",
    "        self.layer0_1x1 = convrelu(64, 64, 1, 0)\n",
    "        self.layer1 = nn.Sequential(*self.base_layers[3:5])\n",
    "        self.layer1_1x1 = convrelu(64, 64, 1, 0)\n",
    "        self.layer2 = self.base_layers[5]\n",
    "        self.layer2_1x1 = convrelu(128, 128, 1, 0)\n",
    "        self.layer3 = self.base_layers[6]\n",
    "        self.layer3_1x1 = convrelu(256, 256, 1, 0)\n",
    "        self.layer4 = self.base_layers[7]\n",
    "        self.layer4_1x1 = convrelu(512, 512, 1, 0)\n",
    "\n",
    "        self.upsample = nn.Upsample(scale_factor = 2, mode = 'bilinear', align_corners = True)\n",
    "        self.conv_up3 = convrelu(256 + 512, 512, 3, 1)\n",
    "        self.conv_up2 = convrelu(128 + 512, 256, 3, 1)\n",
    "        self.conv_up1 = convrelu(64 + 256, 256, 3, 1)\n",
    "        self.conv_up0 = convrelu(64 + 256, 128, 3, 1)\n",
    "        self.conv_original_size0 = convrelu(3, 64, 3, 1)\n",
    "        self.conv_original_size1 = convrelu(64, 64, 3, 1)\n",
    "        self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)\n",
    "        self.conv_last = nn.Conv2d(64, n_class, 1)\n",
    "\n",
    "    def forward(self, input):\n",
    "        x_original = self.conv_original_size0(input)\n",
    "        x_original = self.conv_original_size1(x_original)\n",
    "\n",
    "        layer0 = self.layer0(input)\n",
    "        layer1 = self.layer1(layer0)\n",
    "        layer2 = self.layer2(layer1)\n",
    "        layer3 = self.layer3(layer2)\n",
    "        layer4 = self.layer4(layer3)\n",
    "\n",
    "        layer4 = self.layer4_1x1(layer4)\n",
    "        x = self.upsample(layer4)\n",
    "        layer3 = self.layer3_1x1(layer3)\n",
    "        x = torch.cat([x, layer3], dim = 1)\n",
    "        x = self.conv_up3(x)\n",
    "\n",
    "        x = self.upsample(x)\n",
    "        layer2 = self.layer2_1x1(layer2)\n",
    "        x = torch.cat([x, layer2], dim = 1)\n",
    "        x = self.conv_up2(x)\n",
    "\n",
    "        x = self.upsample(x)\n",
    "        layer1 = self.layer1_1x1(layer1)\n",
    "        x = torch.cat([x, layer1], dim = 1)\n",
    "        x = self.conv_up1(x)\n",
    "\n",
    "        x = self.upsample(x)\n",
    "        layer0 = self.layer0_1x1(layer0)\n",
    "        x = torch.cat([x, layer0], dim = 1)\n",
    "        x = self.conv_up0(x)\n",
    "\n",
    "        x = self.upsample(x)\n",
    "        x = torch.cat([x, x_original], dim = 1)\n",
    "        x = self.conv_original_size2(x)\n",
    "\n",
    "        out = self.conv_last(x)\n",
    "\n",
    "        return out"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a9bd1aba-6784-4bfe-98e2-10f9ee0898aa",
   "metadata": {},
   "source": [
    "# 第三步数据加载及处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "6c792afb-5c14-4570-baf6-b7b9643f2ab2",
   "metadata": {},
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "invalid syntax. Perhaps you forgot a comma? (1586699450.py, line 8)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;36m  Cell \u001b[1;32mIn[15], line 8\u001b[1;36m\u001b[0m\n\u001b[1;33m    test_mask =Image. open (test mask path).convert('L')\u001b[0m\n\u001b[1;37m                            ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax. Perhaps you forgot a comma?\n"
     ]
    }
   ],
   "source": [
    "transform = transform.Compose([\n",
    "    transform.Resize((256,256)),\n",
    "    transform.ToTensor()]),\n",
    "test_img_path=r\"D:\\Car_segmentation\\data\\imgs\\test\\img_23.jpg\"\n",
    "test_masks_path=r\"D:\\Car_segmentation\\data\\masks\\test\\img_23.jpg\"\n",
    "\n",
    "test_img= Image. open (test_img_path).convert('RGB') \n",
    "test_mask =Image. open (test mask path).convert('L')\n",
    "test_img_tensortransform(test_img). unsqueeze (0) \n",
    "print (test_img_tensor. shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a35443a5-29ec-4755-8bdc-60a6c6c12ba7",
   "metadata": {},
   "source": [
    "# 第四步，模型加载以及推理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7eb441b-6df6-4f3f-b574-014757c68196",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_path ='last_model. pth' \n",
    "model =ResNetUNet (n_class=1).cpu ()\n",
    "model. load_state_dict (torch. load (model_path))\n",
    "model. eval ()\n",
    "with torch. no_grad(): \n",
    "    output = model (test_img_tensor) \n",
    "    pred_mask = output. squeeze ().cpu (). numpy()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d2211e01-18b4-407b-9589-d542230ed75a",
   "metadata": {},
   "source": [
    "# 第五步，可视化结果展示"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5dd5883-4034-4bdb-854a-7e9ad92307ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "fig, axes=plt. subplots(1, 3, figsize-(15, 5))\n",
    "axes [0]. imshow (np. array (test_img)) \n",
    "axes [0]. set_title('Original Image')\n",
    "axes [0]. axis('off')\n",
    "\n",
    "axes [1]. imshow(test mask, cmap= 'gray') \n",
    "axes [1]. set_title('True Mask') \n",
    "axes [1]. axis('off')\n",
    "\n",
    "pred mask img= Image. fromarray ((pred mask). astype (np, uint8))\n",
    "axes [2]. imshow (pred_mask_img) \n",
    "axes [2]. set_title ('Predicted Mask') \n",
    "axes [2]. axis('off') \n",
    "\n",
    "plt. show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
