{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "UNet"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "如何在保持空间分辨率的同时提取深层语义特征成为关键问题。为了解决这一挑战，Ronneberger等人在2015年提出了U-Net（Ronneberger et al., 2015），一种专为图像分割设计的卷积神经网络。U-Net的核心思想是采用“编码器-解码器”结构：编码器逐步提取图像的高级语义特征，解码器逐步恢复空间分辨率。通过跳跃连接（skip connections）将编码器的浅层特征与解码器对应层相连，U-Net有效融合了局部细节和全局语义信息。\n",
    "\n",
    "![alt text](../../image_src/unet.jpg)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_conv(in_channels, out_channels, kernel_size, stride, bias=False):\n",
    "    padding = get_padding(kernel_size, stride)\n",
    "    return nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)\n",
    "\n",
    "def get_padding(kernel_size, stride):\n",
    "    kernel_size_np = np.atleast_1d(kernel_size)\n",
    "    stride_np = np.atleast_1d(stride)\n",
    "    padding_np = (kernel_size_np - stride_np + 1) / 2\n",
    "    padding = tuple(int(p) for p in padding_np)\n",
    "    return padding if len(padding) > 1 else padding[0]\n",
    "\n",
    "\n",
    "class InputBlock(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels):\n",
    "        super(InputBlock, self).__init__()\n",
    "        self.conv1 = get_conv(in_channels, out_channels, 3, 1)\n",
    "        self.conv2 = get_conv(out_channels, out_channels, 3, 1)\n",
    "        self.norm = nn.InstanceNorm2d(out_channels, affine=True)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.norm(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.relu(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class ConvLayer(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels, kernel_size, stride):\n",
    "        super(ConvLayer, self).__init__()\n",
    "        self.conv = get_conv(in_channels, out_channels, kernel_size, stride)\n",
    "        self.norm = nn.InstanceNorm2d(in_channels, affine=True)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.norm(x)\n",
    "        x = self.conv(x)\n",
    "        x = self.relu(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class ConvBlock(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels, kernel_size, stride):\n",
    "        super(ConvBlock, self).__init__()\n",
    "        self.conv1 = ConvLayer(in_channels, out_channels, kernel_size, stride)\n",
    "        self.conv2 = ConvLayer(out_channels, out_channels, kernel_size, 1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.conv2(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class UpsampleBlock(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels, kernel_size):\n",
    "        super(UpsampleBlock, self).__init__()\n",
    "        self.conv_block = ConvBlock(out_channels + in_channels, out_channels, kernel_size, 1)\n",
    "\n",
    "    def forward(self, x, x_skip):\n",
    "        x = nn.functional.interpolate(x, scale_factor=2, mode=\"bilinear\", align_corners=True)\n",
    " \n",
    "        x = torch.cat((x, x_skip), dim=1)\n",
    "        x = self.conv_block(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class OutputBlock(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels):\n",
    "        super(OutputBlock, self).__init__()\n",
    "        self.conv = get_conv(in_channels, out_channels, kernel_size=1, stride=1, bias=True)\n",
    "\n",
    "    def forward(self, input_data):\n",
    "        return self.conv(input_data)\n",
    "\n",
    "\n",
    "class UNet(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        in_channels = 4,\n",
    "        out_channels = 3,\n",
    "        kernels=[[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],\n",
    "        strides=[[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]],\n",
    "        filters=[64, 128, 256, 512, 768, 1024, 2048],\n",
    "\n",
    "        deep_supervision=False\n",
    "    ):\n",
    "        super(UNet, self).__init__()\n",
    "\n",
    "        self.n_class = out_channels\n",
    "        self.deep_supervision = deep_supervision         \n",
    "            \n",
    "        self.filters = filters[: len(strides)]\n",
    "        \n",
    "        self.input_block = InputBlock(in_channels, self.filters[0])\n",
    "        \n",
    "        self.downsamples = self.get_module_list(\n",
    "            in_channels=self.filters[:-1],\n",
    "            out_channels=self.filters[1:],\n",
    "            kernels=kernels[1:-1],\n",
    "            strides=strides[1:-1],\n",
    "        )\n",
    "        \n",
    "        self.bottleneck = self.get_conv_block(\n",
    "            in_channels=self.filters[-2],\n",
    "            out_channels=self.filters[-1],\n",
    "            kernel_size=kernels[-1],\n",
    "            stride=strides[-1],\n",
    "        )\n",
    "        \n",
    "        self.upsamples = self.get_module_list(\n",
    "            in_channels=self.filters[1:][::-1],\n",
    "            out_channels=self.filters[:-1][::-1],\n",
    "            kernels=kernels[1:][::-1],\n",
    "            strides=strides[1:][::-1],\n",
    "        )\n",
    "        \n",
    "        self.output_block = OutputBlock(in_channels=self.filters[0], out_channels=self.n_class)\n",
    "        \n",
    "        self.deep_supervision_heads = self.get_deep_supervision_heads()\n",
    "        self.apply(self.initialize_weights)\n",
    "\n",
    "    def get_deep_supervision_heads(self):\n",
    "        return nn.ModuleList([self.get_output_block(1), self.get_output_block(2)])\n",
    "\n",
    "    def get_module_list(self, in_channels, out_channels, kernels, strides, upsample=False):\n",
    "        layers = []\n",
    "        if not upsample:\n",
    "            for in_channel, out_channel, kernel, stride in zip(in_channels, out_channels, kernels, strides):\n",
    "                conv_layer = ConvBlock(\n",
    "                    in_channels=in_channel,\n",
    "                    out_channels=out_channel,\n",
    "                    stride=stride,\n",
    "                    kernel_size=kernel,\n",
    "                )\n",
    "                layers.append(conv_layer)\n",
    "        else:\n",
    "            for in_channel, out_channel, kernel, stride in zip(in_channels, out_channels, kernels, strides):\n",
    "                conv_layer = UpsampleBlock(\n",
    "                    in_channels=in_channel,\n",
    "                    out_channels=out_channel,\n",
    "                    kernel_size=kernel,\n",
    "                )\n",
    "                layers.append(conv_layer)\n",
    "        return nn.ModuleList(layers)\n",
    "\n",
    "    def initialize_weights(self, module):\n",
    "        name = module.__class__.__name__.lower()\n",
    "        if name in [\"conv2d\", \"conv3d\"]:\n",
    "            nn.init.kaiming_normal_(module.weight)\n",
    "        if hasattr(module, \"bias\") and module.bias is not None:\n",
    "            nn.init.constant_(module.bias, 0)\n",
    "            \n",
    "    def forward(self, input_data):\n",
    "        out = self.input_block(input_data)\n",
    "        encoder_outputs = [out]\n",
    "        for downsample in self.downsamples:\n",
    "            out = downsample(out)\n",
    "            encoder_outputs.append(out)\n",
    "        out = self.bottleneck(out)\n",
    "        decoder_outputs = []\n",
    "        for upsample, skip in zip(self.upsamples, reversed(encoder_outputs)):\n",
    "            out = upsample(out, skip)\n",
    "            decoder_outputs.append(out)\n",
    "        out = self.output_block(out)\n",
    "        if self.training and self.deep_supervision:\n",
    "            out = [out]\n",
    "            for i, decoder_out in enumerate(decoder_outputs[-3:-1][::-1]):\n",
    "                out.append(self.deep_supervision_heads[i](decoder_out))\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "unet_2D = UNet(\n",
    "    in_channels = 4,\n",
    "    out_channels = 3,\n",
    "    kernels=[[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]],\n",
    "    strides=[[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]],\n",
    "    filters=[64, 128, 256, 512, 768, 1024, 2048],\n",
    "    spatial_dims=2,\n",
    "    deep_supervision=False\n",
    ")\n",
    "\n",
    "inps_2D = torch.randn((2, 4, 256, 256))\n",
    "\n",
    "\n",
    "out_2D = unet_2D(inps_2D)\n",
    "\n",
    "print(out_2D.size())"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
