{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 卷积",
   "id": "42656d906e84fb79"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 标准卷积",
   "id": "3b6f046f28a4b2f4"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": 1,
   "source": [
    "import torch\n",
    "import torch.nn as nn"
   ],
   "id": "8ce516a2495dae22"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-06T06:52:34.679854Z",
     "start_time": "2025-05-06T06:52:34.676607Z"
    }
   },
   "cell_type": "code",
   "source": [
    "input_feat = torch.tensor([[4, 1, 7, 5], [4, 4, 2, 5], [7, 7, 2, 4], [1, 0, 2,4]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)\n",
    "print(input_feat)\n",
    "print(input_feat.shape)"
   ],
   "id": "d2b2f3dfd2bb26e9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[[4., 1., 7., 5.],\n",
      "          [4., 4., 2., 5.],\n",
      "          [7., 7., 2., 4.],\n",
      "          [1., 0., 2., 4.]]]])\n",
      "torch.Size([1, 1, 4, 4])\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-06T07:27:34.726778Z",
     "start_time": "2025-05-06T07:27:34.723278Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#建立2x2卷积核\n",
    "conv2d = nn.Conv2d(\n",
    "    in_channels=1, # 输入特征图通道数\n",
    "    out_channels=1, # 输出特征图通道数\n",
    "    kernel_size=(2, 2), #卷积核大小\n",
    "    stride=1, # 步长\n",
    "    padding='same', # 补零方式\n",
    "    bias=True\n",
    ")\n",
    "print(conv2d.weight)\n",
    "print(conv2d.bias)"
   ],
   "id": "fa6e8733a0287e41",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Parameter containing:\n",
      "tensor([[[[ 0.3148, -0.4401],\n",
      "          [-0.1627,  0.0136]]]], requires_grad=True)\n",
      "Parameter containing:\n",
      "tensor([-0.2332], requires_grad=True)\n"
     ]
    }
   ],
   "execution_count": 57
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-06T06:53:50.575827Z",
     "start_time": "2025-05-06T06:53:50.572003Z"
    }
   },
   "cell_type": "code",
   "source": [
    "out_feat= conv2d(input_feat)\n",
    "out_feat"
   ],
   "id": "f28334f0799b8033",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[-0.8777,  3.6499, -1.9263,  0.1735],\n",
       "          [ 0.5324,  1.5788,  0.7690, -0.2665],\n",
       "          [ 0.2763, -2.8965,  0.3700,  0.1988],\n",
       "          [-0.1651,  1.0982,  0.9657, -1.5609]]]],\n",
       "       grad_fn=<ConvolutionBackward0>)"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 53
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 深度可分离卷积",
   "id": "73da623d8c3fd64f"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "DW卷积: 特征图m个通道对应m个3x3卷积核 输出每个通道的特征图\n",
    "PW卷积: 将DW的m个特征图结合 通过n个具有m个通道的1x1卷积核 输出n个通道的特征图(每个通道特征图分别与卷积中对应通道运算并最终求和)"
   ],
   "id": "52f56e4ddd3742c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-06T08:22:16.918306Z",
     "start_time": "2025-05-06T08:22:16.908546Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# ⽣成⼀个三通道的5x5特征图\n",
    "x = torch.rand((3, 128, 128)).unsqueeze(0)\n",
    "print(x)\n",
    "\n",
    "# 请注意DW中，输⼊特征通道数与输出通道数是⼀样的\n",
    "in_channels_dw = x.shape[1]\n",
    "out_channels_dw = x.shape[1]\n",
    "kernel_size = 3\n",
    "stride = 1\n",
    "# DW卷积groups参数与输⼊通道数⼀样\n",
    "dw = nn.Conv2d(in_channels_dw, out_channels_dw, kernel_size, stride, groups=in_channels_dw)\n",
    "\n",
    "in_channels_pw = out_channels_dw\n",
    "out_channels_pw = 10\n",
    "kernel_size_pw = 1\n",
    "pw = nn.Conv2d(in_channels_pw, out_channels_pw, kernel_size_pw, stride)\n",
    "out = pw(dw(x))\n",
    "print(out.shape)"
   ],
   "id": "6e0cd6e8d0784b34",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[[0.1707, 0.4988, 0.2675,  ..., 0.7141, 0.8115, 0.1849],\n",
      "          [0.3948, 0.8107, 0.4295,  ..., 0.9246, 0.1493, 0.6835],\n",
      "          [0.5053, 0.7115, 0.4227,  ..., 0.2883, 0.0692, 0.0202],\n",
      "          ...,\n",
      "          [0.2594, 0.1274, 0.2991,  ..., 0.0579, 0.0033, 0.1378],\n",
      "          [0.0797, 0.1865, 0.0662,  ..., 0.5826, 0.7487, 0.9719],\n",
      "          [0.4701, 0.6159, 0.8511,  ..., 0.1090, 0.1557, 0.3800]],\n",
      "\n",
      "         [[0.8580, 0.7249, 0.2309,  ..., 0.8220, 0.2059, 0.6135],\n",
      "          [0.4017, 0.4548, 0.9467,  ..., 0.3815, 0.3109, 0.7384],\n",
      "          [0.6221, 0.9774, 0.5184,  ..., 0.9883, 0.2396, 0.3790],\n",
      "          ...,\n",
      "          [0.7154, 0.7642, 0.3651,  ..., 0.5102, 0.5035, 0.5378],\n",
      "          [0.5741, 0.1283, 0.3474,  ..., 0.5510, 0.6168, 0.2454],\n",
      "          [0.8070, 0.0345, 0.5920,  ..., 0.4853, 0.8344, 0.5198]],\n",
      "\n",
      "         [[0.6684, 0.6259, 0.0613,  ..., 0.3332, 0.5717, 0.6087],\n",
      "          [0.1224, 0.1780, 0.4925,  ..., 0.8184, 0.6236, 0.9259],\n",
      "          [0.3363, 0.3670, 0.4967,  ..., 0.2737, 0.2390, 0.1986],\n",
      "          ...,\n",
      "          [0.7912, 0.3996, 0.0246,  ..., 0.2820, 0.6960, 0.9353],\n",
      "          [0.9910, 0.5999, 0.1397,  ..., 0.1625, 0.4438, 0.0359],\n",
      "          [0.2827, 0.1374, 0.8609,  ..., 0.3287, 0.7467, 0.0528]]]])\n",
      "torch.Size([1, 10, 126, 126])\n"
     ]
    }
   ],
   "execution_count": 64
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
