{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "池化层的作用：降低卷积层对位置的敏感性，同时降低对空间降采样表示的敏感性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from d2l import torch as d2l"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pool2d(X, pool_size, mode='max'):\n",
    "    p_h, p_w = pool_size\n",
    "    Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n",
    "    for h in range(Y.shape[0]):\n",
    "        for w in range(Y.shape[1]):\n",
    "            if mode == 'max':\n",
    "                Y[h, w] = X[h:h + p_h, w:w + p_w].max()\n",
    "            elif mode == 'avg':\n",
    "                Y[h, w] = X[h:h + p_h, w:w + p_w].mean()\n",
    "            else:\n",
    "                print(f'{mode} nonexistence')\n",
    "    return Y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "min nonexistence\n",
      "min nonexistence\n",
      "min nonexistence\n",
      "min nonexistence\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[0., 0.],\n",
       "        [0., 0.]])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X = torch.arange(9).reshape(3,3)\n",
    "pool2d(X, (2,2),'min')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[4., 5.],\n",
       "        [7., 8.]])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pool2d(X, (2,2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'pool2d' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-5-8f6507cd1ef3>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[0mX\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m9.0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mpool2d\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m'avg'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m: name 'pool2d' is not defined"
     ]
    }
   ],
   "source": [
    "X = torch.arange(9.0).reshape(3,3)\n",
    "pool2d(X, (2,2),'avg')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 0.,  1.,  2.,  3.],\n",
       "          [ 4.,  5.,  6.,  7.],\n",
       "          [ 8.,  9., 10., 11.],\n",
       "          [12., 13., 14., 15.]]]])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X = torch.arange(16, dtype=torch.float32).reshape(1, 1, 4, 4)\n",
    "X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 5.,  7.],\n",
       "          [13., 15.]]]])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pool2d = nn.MaxPool2d(2)\n",
    "pool2d(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 5.,  7.],\n",
       "          [13., 15.]]]])"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pool2d2 = nn.MaxPool2d(3, padding=1, stride=2)\n",
    "pool2d2(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 1.,  3.],\n",
       "          [ 9., 11.],\n",
       "          [13., 15.]]]])"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pool2d3 = nn.MaxPool2d((2, 4), padding=(1, 2), stride=(2, 3))\n",
    "pool2d3(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 0.,  1.,  2.,  3.],\n",
       "          [ 4.,  5.,  6.,  7.],\n",
       "          [ 8.,  9., 10., 11.],\n",
       "          [12., 13., 14., 15.]],\n",
       "\n",
       "         [[ 1.,  2.,  3.,  4.],\n",
       "          [ 5.,  6.,  7.,  8.],\n",
       "          [ 9., 10., 11., 12.],\n",
       "          [13., 14., 15., 16.]]]])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X = torch.cat((X, X + 1), 1)\n",
    "X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 1.,  3.],\n",
       "          [ 9., 11.],\n",
       "          [13., 15.]],\n",
       "\n",
       "         [[ 2.,  4.],\n",
       "          [10., 12.],\n",
       "          [14., 16.]]]])"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pool2d3(X)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# exercise"
   ]
  },
  {
   "attachments": {
    "%E5%9B%BE%E7%89%87.png": {
     "image/png": "iVBORw0KGgoAAAANSUhEUgAABMgAAAAbCAYAAACJK7OdAAAWFElEQVR4nO2dP2vjyvfGvy9l3sSUJk3YYt3E/JqIFIJbGLYwBBZzIWjhYgKLSRHMQhAXgllYTOCCw4IDARUX3CxKsTjFxSkWBRZUBFQsqFhQ8/yKGdnS6N/Idv6f4gO7jiyfOXNmRvNo5sz/oigCQRAEQRAEQRAEQRAEQbxW/vfYBhAEQRAEQRAEQRAEQRDEY0ICGUEQBEEQBEEQBEEQBPGqIYGMIAiCIAiCIAiCIAiCeNWQQEYQBEEQBEEQBEEQBEG8akggIwiCIAiCIAiCIAiCIF41JJARK+Nd2rBPHHhPwBbi5UJx9tIJ4J7ZsM9cBI9uy8MS/DfB8MSGfWLD+fH49jwNnns8PHf7NfjtY3om4vbpltODc2LDvvSegC0EQRAEQTwXHk0gC3+6GB93Mfy+5n1uHQwP2mhtMTDGsb3bxeBi/kQf2O6J6yEM3kD/W/igv+seMzA2gPvY5SdeNJk4e6R4J6oI4HxogLfHNcVMH+N3DOzdGP6jl+HhCC4tcMbA3xgw9npw/Me36Wmw4Xi4c2BtcbT/eSih5InG828PznEXxhsunpX+sDD8FpR+x7/s5cTmDPYOA2MNtPYMGKezxy9bLi4GjIEdu+vfi8YcgiAIgng1PKxAFvpwzwfo7jTAGANjDIOr1e/nXVpoMQa+28XwYorpdILRYRsNxtB4P3k9K06uh2jvtWF/J4GsFr6D3p6B3qX/+LYQheQJZI8R70QVAZxDA8bBpKYw8EQFhXtFltkcvZ5x6iHi4fsQxp6RfvF2J/p56+Kh+vmnGM8+JvscjBvonozhOGMM3jXAWAuDkn7UP2+DsTbGPxOfXw3AGUfv36fe/25WIFPHnNmpAWNviNmjl5MgCIIgiE3yoAKZf94G22qhezyG+22E9joC2c8JOpyB72dXKwROD5xxWE7521FiPZ69QPZzjDZjaJ+TQPaUefZxRlTwFAWF+2aDk/cXxxrxcDVY+8Xbo9p/X1zbaDIGy0mIWuEUfc7AjopjME8gyxXNniT328ZoXCIIgiCIl8mDCmRhECCM/y/FiVUfZGd/N8GYiVFu3pYZ7LcM7MBJbbUMfjgYyXwv9ukYbmrbQCJvyG8f7vlQXHcywrRw60uI+VeRH2mu/C24GsE+Gaa+G/50MT4t+v34OyO4QfrzyhxMP5x0DpvE//2r8SLHzcjxpD8CeM5Ilm+I8ZWf9cOlhyjw4MR5Rk4nmCt2FT4g+olynjnwgnxb69u2JP+7WT+mrpvG95K5ST520GQMzf1+rt9TpGJC+c1fM4xPbIyuVEFW+vKf2TLudW2/k99N2pWsjxLfJON85HgI7lyM8nIcldVTBv24iNtGspzDcxf+79Wuy8RZSbwnyz4s2Gq9/D3pwx9Odf1X9h95dTSCc6Mn0mvdO6dOlm3FxsiZI0j6LlGu/HawQjyk2kFB35GTkyj5+9l76wsKKT8V2LjReqrVRmIfBZin6iWvjVswGQMzLXFdRZ6k1eJDz1/R3Tztix+KL3Ta16285ka9v+xrHU/bP9l4KIqrRI6pQPRx9oEJxhjMg8SYKf+W6Z8r7UjeX6fPq4jnsjHEn2KYO4ZIG77OE2OIxvh95i76ffFMEyIIAoS/c+zcL17xmRbDhC39/SYYa6LzURmf8tBqZ7pjRQXJ3zpz4AUFAlllveeQbANX4ruWycCYCYvynBEEQRDEi+LxkvSvJZDJB7u9ou0pIfzvU0y/+/KhMoT7yQBnHNudHuyTHjpvuNhecBUq9+ygsyPyc/QOTGxzBsY7mBS8LQ0uumCsCfs6+XkA54CBvbUXy++9r12x9XOni17i963Eg1XRm9nKN5XqW3P5/9augcYbE9ahhbbc1to6HmO0z9HYacM6WOYj6Xz1035420KLN9B6Z8F6byz8ML4tt2tRTtNC/8iCucXAtrqY3G7CtghR5GHyvgHGGjAP+ugfmJkttcKPBjr7LXD5G2bqXjMM9wwYu9t6OYBuJ+huMbAtE9aRvfjN5epF6bP3k/SDduDAYgzmmVfT9iZaOzJPzE4Xk58RwqsBDM7A33TQO7HR6wjbW8duSnzzzjuyTMtyN3YNNJW2VllPRW1OIy6S5Wy978E+7Ijrdiw4K1yXibOCeG/vd8C3Wuge9hbbuFufkuJkCPe4BRa3w8MuWlscxm6rYkWETv8RIbodi1Wtb0xYR31Yf2yDMw7jU7qOVrp3Bg/jfQ7GtxextM0Z+O4A7i+lre20wN8Y6B4k25oSNzrxcOvA2mFiFfChjd77FhqMofXBkRPsPGEgHfN2fO9UzOgIZCFmf5vCT39IG99wMG5ieB3W86VmPdVvIxGiXy4Gu+l6aTAG/oeNmawXkddJ+I5ttWCU5nFaNT50/JXsV6QvzAYY4zD/TrQbrfblYsAZmidKOa5tNBlHfxpq+ycbD0XxkRBA5HZ5Q9rV2Elsf8tbKaxlh7z/uw46XPRRccyznQFmYUVfmbS3cgyRL/XUMSRe+XUZZOzuHMbjQDJ2k/00A+PbaP1ZIID9EuNT8+/iHGLpZxI5bsY5zHarxk2ddqY7VlQgf0vUZ0/81lZD1FVSINOq9xwSY45ov4bMe/vU87ARBEEQBFGXZyqQzTDgNbYw/BjBZExJ0uthZOY8hLMWBsnEtbfCTuNLwRvCcIoeUyYG8Wfxg6f8/fSkNJCT9eXEfNMCWfNomnjYDjE95PLz9AOqeKCOxbzYD22MUpNj+QCaeIDPruyRfj5L+OqXi8EOA49X861lWwTvzMzYFl4N0GJ8MYkQfhT+DtR7JUVVrS2WIaYfOdjb/lJ4iCKE/4ptvLHPhVDaxeRueU1waaXqs5btH53EW/RlrCYFYe+LmY6Xuwm6jIEfJCdZwUIUWsSHTj1l0I8LYZcygQ/E/ZNtVvc6XYGM74/hLXwWwj1qgrEepvFEVk42U5P/31JoKhPItPoPKYrvDVOrSeefzZKVrrr3ziJiy8AwuWLnZggzKcgWtDURDwlRXyseAkzeC2Eg1Q6+9RMCSFYYCKd9cNZMJ7f+NUWPM/DFxFVDIJNCS+c87adUHq9N1tNKbUT2Y6pgLNtIM9X/a27/WjE+tPwV14OSpsD7pwPOmst2qdm+ZifNVF8dRXKlN4/brq5/VhDIivqGKMrp53XtkPdXrhMxz0pycKn26o0h3hcjM4aIzyw4Qfo+08SqLfFSpCXb8/I5pu+UjWuxwG7BuSu6Zp0tlnrtTHcMKEf2TYpfxNiajI867VMhJ65oiyVBEARBvEyeqUAWv9nVfIDK3V6gPuDIB8vMCWxVE5n4oXU5MQj/7YExY/EQOPvEwXh/OVGPkYJGLL5tWiBTfRuvrFIn62K7quKHHN/OTpqJh/WsXWIylC2n9097OZFayzbxlp1/nCoPsh7GbQZ2KD5f+FF5A53xo2YOsjAIEATKZEj9rqzL7kUsroaYHiaFo/VsD4MAgfqGW/Gl/7WT60P1Oq16yvhBNy6EeJ0tZywixvbpXqcvkPW/5cXUsj3NPvHc8lVO+DT7j8l+dkJY9N16984S1/VQ2dKWipOifEwyVmMBXyseCtvKHKN3BszPs/wYCQNRvtJYqhbI3GOeEFoSseL0xSqWu83W00ptxJ+gk1oxqraR5Pc0BbIV40PHXyKG8sRbuZpJ9km67UuIcsnV1B5Ge4mXR9r+uWeBTNsOeX81R1fluJEjFOuMIVIMXY4hwn+LesisSF76ob94jih6jkkSi2Mm7OvyRPurC2Q67Ux/DChF1mf2JaYSH7XapwIJZARBEATxangdAlkUYZG347ArtrQslshXCUPVE5l4lUQ8MXCPeWKlUtnkT4ojckXCwwhk2fuLz6sFMlX4yxUY33bQj3PexBwkVjqtY5uMGZEzLP0bVmJFhbYf6yTp912MT/uw3onYibdnLr8r35jHq0vkKsLFZGdN26Mozv/TQ1fGb7ydKPZlYZykfK5ZTxkfaMZFmU/lakzrMtC/Lq9cK8V7sf26KyLK+w91u9oIk2uvWBiree8Mylah0cUMXoWAmux3lhPv9dqtVozI3Ev9g/aifNupFcBVAlmdpOebqKcV20iZj77106v2aiUQrxsfev4qG1fco8TKXe0+WwhryZXTRrLM2v65Z4FM246COlpBIIuiSGMMkSsD45cq8ncWK9Wk3SK/WpI+Om9jO6vrXqxkrtqiW1TH+kn6K9tZjTGglML6VOqvVvus/g0SyAiCIAjiZfJMBTKdt6RJ5BvTrTZ6ZxNMp1NMp1MM/9yMQJbOvyL+vXxLWfbAmv7bUxfIxH05Bt/z7JLf49uLSVwamatkAwKZyBmWw1/OvQhk8TZI408bY0fEzvRrH4byXSGUipVUQjRarrZb13axjaaB9uEIExm/08/d1QWyqnqqiNXCuCjzafJvutfllevBBTKd/kNyN4dz1kNXTn4ZN9ArTeBc494qMtl0L84FxziMvxKHeRROCJPJuddrt5UxEm9L3O3CPndk+Sbo792HQLapelqxjZT5KPM3XYFslfjYgECW/FuNPts7MxfCmvfFSG+51PbPIwpkqb9tTiDTHUOET8U2S/+8nV7FKG0T+dVyOC1YxanYNtnXf3Za+xTLsnZWYwwoZRMCWVX/RgIZQRAEQbwanqlAJldpJQWIFHKFhDydKTdfT1Qg8KwkkCXyr3y30UxtW4lzceQdKCBzqcl7P3WBTGzJWX4/bVdZOUtsrWOb3GJSmA+u4l6rCWRyu9EHJ70NJO+74RR9zmBd+mLb7WFi68g6tsff/Twv9WU6X03RdZr1lEEzLsrK+X0AHtuhe11eva0U73LLq5IjqSxeYvT6jxxCH87HFhhrYfjfhu+dIYTv9NFiDK3Tealf0v2OZjzI/G3qNruqGBFbpC04v8quq5rUSxsrJvWbq6cV24j0UV5uKrXv1B1XViuTnr9E3eRtK5MiSlwfdfrsHyOYrAn7WqyGSrVvbf/cs0CmbcemBLIaY8hiq/4ck/fK9sMSu8vaYKafSB1gVM7aAllZO6sxBpTyYwQj1y9K/dVqnwokkBEEQRDEq+HZCmTRtY0WYzDzHq7iZNxnydxeHUxSb/1DOB82J5DF+VdMs7lMgiwR+TRameX74dUgdcKXuoUxtnN6+AgCmZo7ZJEIdykwqA+IYutGXk6bAP7dJmyTW1DMnInrnb9InL1ZgUzUP/+knFJ1M8y8/Re/weWpZ4mT29a1XdqZPs0zQuhYaV/KxNzqBEC9TqueMujGRZzMXRWiQrjHybxOutdtSiCL22G23PPPRunkSKv/KMqDczNEq6Sf0+ubsnUh8vuosTTH8P+yKyYsR5kQKifj6cWDm58vSIrCYltdtg91jxkYHyh1PMew1gqy2E9ZG4XtQhTeZD2t1kZknsFMEn8/nRw/0a9UjSurxYeev+L+IrONTfY36mEPen22KGtzvwMz8/u6/slJcn+YE+9yG3v9JP26dmxKIKszhsh+0TRhMmUMkW2NH2ZzdoW+X3DAwXqsLJBptTP9MaAcWZ+qXzLxUad9KpBARhAEQRCvhicpkPkXFox3durEpyzxaWwc5icHcz9AEHiYXdhobykPXVJMa30Ywb0N4N9MMfogTvbbmEAWr1rLfXCewd5hYDsWRlcegiCAdzVEd0s5QSw+hXB/CPc2QODP4Zy0xVHlDy2QMYbWx8nCr9NPJjjjKZEm84AYzsTpU1tdDGU5g9sp7HcNsF0bs8ok/VW2RQi/i5OpGu+lj4IA3lTUuSFPJ9QWyOQb7OahAy/ws0nwowhiNYZIaDxw5vADD7OLAUxeUNdSfMhLkL267Ur8+HNMzyxxQlfKlz4m+0lbfcydpa2L63TqKeMH/bhQ21sQeHBPu2iop+ppXrcpgWxR7kU79OAu/Fgy4dPqPxJxMo196mJ00Cw/KU7r3lmCSwuccZifpvAC6bsvVlrwkH5hvLOoZ/96DGuHge0MlvWsGQ/+V7HNt3vqit+8dYWtvINJwTbWpZ2ij/auJxj8IU6srSOQxTby3T4mNz6CRGy3PslTSTdZTyu1kWx5A3+OyZEBnsn5pP/iZZX40PJXpm8L4N9M0N/l6dNKa/bZQojOr089/+TE0UUXjHF0ZOz5N46oC9WHsv9tf57BDwIEBQdM6NmxKYGs3hgiXpTljyGiDXIYR3Ef7GN23oPBucx3Wb2CbPa5i+7p7J5XkGm2M60xQJappK3Efonr079xlv1M4nv67VMhpw2IFZhtDK99BHfyIJKfE1h7bdga+d0IgiAIgniaPEmBbH7aKnh7rxJift6DucXkhIGBsQbMwzHmitjhXfZg8OQ1EzincuXIbYT1BbL47XzBg+OvOcZ/GSIPh7adDI13Q0w+t8snQ/chkLVtjBfinLC3rTxU575BDdRycmx3hpgFm7BNEPw3Rm+XL+ucb6NzOqu/giwKMTtZ2lq4mvHXDMN4MsYY+JsOhtMxLMayJ5zFiapPZrn3WtX26NZJfa9h9jC5lCsQ/vHKbT2xsuWrqqcM+nERRRHCG6WcWyZ65/OVrtuYQFbgR/uoesJX3X9EiH57mByaCd8w8N0exjflkyWte+d970Lp+7iR9l28guxz2sd8twdHva9WPGT724bZw2Rxr7w+NMTsNBEvfBud0ynGHxgY61fnPUxy58LubC9tVNrOxuupdhuRbfybfEmTaIP2NzXZeI1xZcX40PFXFAVwU+2ZY7tjw00KujX77OyJvnX9kxcPHhxl/GyfTjDMCCc+Ju/j/k/aViBoVduxwST9dcaQeKVYQWz407TdbMtE70In56nw48hkYDvqSuB81tpiqdnOqscAeZhRaVvJ62fy4kO3fSrkrkycoBvfJ/b3f2KFXN5JmQRBEARBPA8eTyDbMLnHqGdIHjG+eWYnzeUJVEX81rFBXBNobS/YNMoDtpa9xeW81zL8EquwdN6EV99H442v7nX3aHsYBAUr3UpsLUtArF1PK8bFY/i2ph/1c+rotgXpU516qn3vgrLk+U6pd60+UjMetONQue/abTWKEIVVbWfD9bRiX6Y3Jt1/fFT7K+GLBxxzVvKPVlnqxedm66kCnX7uboJu2YmKj2H3Wmi2syLfyK2SWgn7a/Qzm/Hf/T5TEgRBEATx8LwYgezR+TVFjxe/NX8+bDaHCfGQhHBPh3BTK1wCOIe8Rj6XlxwXHiYnY3jJyczvOYZFueGeO1onTxIE8ZTwvpi5h4m8VsRpnj1Ma71wIAiCIAiCWA0SyNblhwP7pIfOG57O6/NseQlCyGvFE9uL+DY6hzbskz4sswHGGuh+XXfLxwuIi18uBrscbMuEdWQv2y03yvPPPFdIICOIZ0IA98xG/8AU+be+aqyWei3czeFWbFMnCIIgCILYFCSQrcv3IYw9A+2DYTpvy7NFPKjbZ275VlHiyeJfjTE87MLYa8M6GmH6cxOTi5cSFwE8Z4T+QRvGXhe90wlmL6Ld5vDDgX1iw6nM5UgQxOPiw/nLgLHXxeAim6uRIAiCIAiCeBhIICMIgiAIgiAIgiAIgiBeNSSQEQRBEARBEARBEARBEK8aEsgIgiAIgiAIgiAIgiCIV83/A0FmmvpnT8RGAAAAAElFTkSuQmCC"
    }
   },
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "conv2d = nn.Conv2d(2, 2, kernel_size=(2, 4), padding=(1, 2), stride=(2, 3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on class Conv2d in module torch.nn.modules.conv:\n",
      "\n",
      "class Conv2d(_ConvNd)\n",
      " |  Conv2d(in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]] = 1, padding: Union[str, int, Tuple[int, int]] = 0, dilation: Union[int, Tuple[int, int]] = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', device=None, dtype=None) -> None\n",
      " |  \n",
      " |  Applies a 2D convolution over an input signal composed of several input\n",
      " |  planes.\n",
      " |  \n",
      " |  In the simplest case, the output value of the layer with input size\n",
      " |  :math:`(N, C_{\\text{in}}, H, W)` and output :math:`(N, C_{\\text{out}}, H_{\\text{out}}, W_{\\text{out}})`\n",
      " |  can be precisely described as:\n",
      " |  \n",
      " |  .. math::\n",
      " |      \\text{out}(N_i, C_{\\text{out}_j}) = \\text{bias}(C_{\\text{out}_j}) +\n",
      " |      \\sum_{k = 0}^{C_{\\text{in}} - 1} \\text{weight}(C_{\\text{out}_j}, k) \\star \\text{input}(N_i, k)\n",
      " |  \n",
      " |  \n",
      " |  where :math:`\\star` is the valid 2D `cross-correlation`_ operator,\n",
      " |  :math:`N` is a batch size, :math:`C` denotes a number of channels,\n",
      " |  :math:`H` is a height of input planes in pixels, and :math:`W` is\n",
      " |  width in pixels.\n",
      " |  \n",
      " |  \n",
      " |  This module supports :ref:`TensorFloat32<tf32_on_ampere>`.\n",
      " |  \n",
      " |  * :attr:`stride` controls the stride for the cross-correlation, a single\n",
      " |    number or a tuple.\n",
      " |  \n",
      " |  * :attr:`padding` controls the amount of padding applied to the input. It\n",
      " |    can be either a string {'valid', 'same'} or a tuple of ints giving the\n",
      " |    amount of implicit padding applied on both sides.\n",
      " |  \n",
      " |  * :attr:`dilation` controls the spacing between the kernel points; also\n",
      " |    known as the à trous algorithm. It is harder to describe, but this `link`_\n",
      " |    has a nice visualization of what :attr:`dilation` does.\n",
      " |  \n",
      " |  * :attr:`groups` controls the connections between inputs and outputs.\n",
      " |    :attr:`in_channels` and :attr:`out_channels` must both be divisible by\n",
      " |    :attr:`groups`. For example,\n",
      " |  \n",
      " |      * At groups=1, all inputs are convolved to all outputs.\n",
      " |      * At groups=2, the operation becomes equivalent to having two conv\n",
      " |        layers side by side, each seeing half the input channels\n",
      " |        and producing half the output channels, and both subsequently\n",
      " |        concatenated.\n",
      " |      * At groups= :attr:`in_channels`, each input channel is convolved with\n",
      " |        its own set of filters (of size\n",
      " |        :math:`\\frac{\\text{out\\_channels}}{\\text{in\\_channels}}`).\n",
      " |  \n",
      " |  The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n",
      " |  \n",
      " |      - a single ``int`` -- in which case the same value is used for the height and width dimension\n",
      " |      - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n",
      " |        and the second `int` for the width dimension\n",
      " |  \n",
      " |  Note:\n",
      " |      When `groups == in_channels` and `out_channels == K * in_channels`,\n",
      " |      where `K` is a positive integer, this operation is also known as a \"depthwise convolution\".\n",
      " |  \n",
      " |      In other words, for an input of size :math:`(N, C_{in}, L_{in})`,\n",
      " |      a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments\n",
      " |      :math:`(C_\\text{in}=C_\\text{in}, C_\\text{out}=C_\\text{in} \\times \\text{K}, ..., \\text{groups}=C_\\text{in})`.\n",
      " |  \n",
      " |  Note:\n",
      " |      In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. See :doc:`/notes/randomness` for more information.\n",
      " |  \n",
      " |  Note:\n",
      " |      ``padding='valid'`` is the same as no padding. ``padding='same'`` pads\n",
      " |      the input so the output has the shape as the input. However, this mode\n",
      " |      doesn't support any stride values other than 1.\n",
      " |  \n",
      " |  Args:\n",
      " |      in_channels (int): Number of channels in the input image\n",
      " |      out_channels (int): Number of channels produced by the convolution\n",
      " |      kernel_size (int or tuple): Size of the convolving kernel\n",
      " |      stride (int or tuple, optional): Stride of the convolution. Default: 1\n",
      " |      padding (int, tuple or str, optional): Padding added to all four sides of\n",
      " |          the input. Default: 0\n",
      " |      padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n",
      " |          ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n",
      " |      dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n",
      " |      groups (int, optional): Number of blocked connections from input\n",
      " |          channels to output channels. Default: 1\n",
      " |      bias (bool, optional): If ``True``, adds a learnable bias to the\n",
      " |          output. Default: ``True``\n",
      " |  \n",
      " |  \n",
      " |  Shape:\n",
      " |      - Input: :math:`(N, C_{in}, H_{in}, W_{in})`\n",
      " |      - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where\n",
      " |  \n",
      " |        .. math::\n",
      " |            H_{out} = \\left\\lfloor\\frac{H_{in}  + 2 \\times \\text{padding}[0] - \\text{dilation}[0]\n",
      " |                      \\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor\n",
      " |  \n",
      " |        .. math::\n",
      " |            W_{out} = \\left\\lfloor\\frac{W_{in}  + 2 \\times \\text{padding}[1] - \\text{dilation}[1]\n",
      " |                      \\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor\n",
      " |  \n",
      " |  Attributes:\n",
      " |      weight (Tensor): the learnable weights of the module of shape\n",
      " |          :math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`\n",
      " |          :math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]})`.\n",
      " |          The values of these weights are sampled from\n",
      " |          :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n",
      " |          :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`\n",
      " |      bias (Tensor):   the learnable bias of the module of shape\n",
      " |          (out_channels). If :attr:`bias` is ``True``,\n",
      " |          then the values of these weights are\n",
      " |          sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n",
      " |          :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel\\_size}[i]}`\n",
      " |  \n",
      " |  Examples:\n",
      " |  \n",
      " |      >>> # With square kernels and equal stride\n",
      " |      >>> m = nn.Conv2d(16, 33, 3, stride=2)\n",
      " |      >>> # non-square kernels and unequal stride and with padding\n",
      " |      >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))\n",
      " |      >>> # non-square kernels and unequal stride and with padding and dilation\n",
      " |      >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))\n",
      " |      >>> input = torch.randn(20, 16, 50, 100)\n",
      " |      >>> output = m(input)\n",
      " |  \n",
      " |  .. _cross-correlation:\n",
      " |      https://en.wikipedia.org/wiki/Cross-correlation\n",
      " |  \n",
      " |  .. _link:\n",
      " |      https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n",
      " |  \n",
      " |  Method resolution order:\n",
      " |      Conv2d\n",
      " |      _ConvNd\n",
      " |      torch.nn.modules.module.Module\n",
      " |      builtins.object\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]] = 1, padding: Union[str, int, Tuple[int, int]] = 0, dilation: Union[int, Tuple[int, int]] = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', device=None, dtype=None) -> None\n",
      " |      Initializes internal Module state, shared by both nn.Module and ScriptModule.\n",
      " |  \n",
      " |  forward(self, input: torch.Tensor) -> torch.Tensor\n",
      " |      Defines the computation performed at every call.\n",
      " |      \n",
      " |      Should be overridden by all subclasses.\n",
      " |      \n",
      " |      .. note::\n",
      " |          Although the recipe for forward pass needs to be defined within\n",
      " |          this function, one should call the :class:`Module` instance afterwards\n",
      " |          instead of this since the former takes care of running the\n",
      " |          registered hooks while the latter silently ignores them.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from _ConvNd:\n",
      " |  \n",
      " |  __setstate__(self, state)\n",
      " |  \n",
      " |  extra_repr(self)\n",
      " |      Set the extra representation of the module\n",
      " |      \n",
      " |      To print customized extra information, you should re-implement\n",
      " |      this method in your own modules. Both single-line and multi-line\n",
      " |      strings are acceptable.\n",
      " |  \n",
      " |  reset_parameters(self) -> None\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from _ConvNd:\n",
      " |  \n",
      " |  __annotations__ = {'_in_channels': <class 'int'>, '_reversed_padding_r...\n",
      " |  \n",
      " |  __constants__ = ['stride', 'padding', 'dilation', 'groups', 'padding_m...\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from torch.nn.modules.module.Module:\n",
      " |  \n",
      " |  __call__ = _call_impl(self, *input, **kwargs)\n",
      " |  \n",
      " |  __delattr__(self, name)\n",
      " |      Implement delattr(self, name).\n",
      " |  \n",
      " |  __dir__(self)\n",
      " |      Default dir() implementation.\n",
      " |  \n",
      " |  __getattr__(self, name: str) -> Union[torch.Tensor, ForwardRef('Module')]\n",
      " |  \n",
      " |  __repr__(self)\n",
      " |      Return repr(self).\n",
      " |  \n",
      " |  __setattr__(self, name: str, value: Union[torch.Tensor, ForwardRef('Module')]) -> None\n",
      " |      Implement setattr(self, name, value).\n",
      " |  \n",
      " |  add_module(self, name: str, module: Union[ForwardRef('Module'), NoneType]) -> None\n",
      " |      Adds a child module to the current module.\n",
      " |      \n",
      " |      The module can be accessed as an attribute using the given name.\n",
      " |      \n",
      " |      Args:\n",
      " |          name (string): name of the child module. The child module can be\n",
      " |              accessed from this module using the given name\n",
      " |          module (Module): child module to be added to the module.\n",
      " |  \n",
      " |  apply(self: ~T, fn: Callable[[ForwardRef('Module')], NoneType]) -> ~T\n",
      " |      Applies ``fn`` recursively to every submodule (as returned by ``.children()``)\n",
      " |      as well as self. Typical use includes initializing the parameters of a model\n",
      " |      (see also :ref:`nn-init-doc`).\n",
      " |      \n",
      " |      Args:\n",
      " |          fn (:class:`Module` -> None): function to be applied to each submodule\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> @torch.no_grad()\n",
      " |          >>> def init_weights(m):\n",
      " |          >>>     print(m)\n",
      " |          >>>     if type(m) == nn.Linear:\n",
      " |          >>>         m.weight.fill_(1.0)\n",
      " |          >>>         print(m.weight)\n",
      " |          >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))\n",
      " |          >>> net.apply(init_weights)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 1.,  1.],\n",
      " |                  [ 1.,  1.]])\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 1.,  1.],\n",
      " |                  [ 1.,  1.]])\n",
      " |          Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          )\n",
      " |          Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          )\n",
      " |  \n",
      " |  bfloat16(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``bfloat16`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  buffers(self, recurse: bool = True) -> Iterator[torch.Tensor]\n",
      " |      Returns an iterator over module buffers.\n",
      " |      \n",
      " |      Args:\n",
      " |          recurse (bool): if True, then yields buffers of this module\n",
      " |              and all submodules. Otherwise, yields only buffers that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          torch.Tensor: module buffer\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for buf in model.buffers():\n",
      " |          >>>     print(type(buf), buf.size())\n",
      " |          <class 'torch.Tensor'> (20L,)\n",
      " |          <class 'torch.Tensor'> (20L, 1L, 5L, 5L)\n",
      " |  \n",
      " |  children(self) -> Iterator[ForwardRef('Module')]\n",
      " |      Returns an iterator over immediate children modules.\n",
      " |      \n",
      " |      Yields:\n",
      " |          Module: a child module\n",
      " |  \n",
      " |  cpu(self: ~T) -> ~T\n",
      " |      Moves all model parameters and buffers to the CPU.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  cuda(self: ~T, device: Union[int, torch.device, NoneType] = None) -> ~T\n",
      " |      Moves all model parameters and buffers to the GPU.\n",
      " |      \n",
      " |      This also makes associated parameters and buffers different objects. So\n",
      " |      it should be called before constructing optimizer if the module will\n",
      " |      live on GPU while being optimized.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Args:\n",
      " |          device (int, optional): if specified, all parameters will be\n",
      " |              copied to that device\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  double(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``double`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  eval(self: ~T) -> ~T\n",
      " |      Sets the module in evaluation mode.\n",
      " |      \n",
      " |      This has any effect only on certain modules. See documentations of\n",
      " |      particular modules for details of their behaviors in training/evaluation\n",
      " |      mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n",
      " |      etc.\n",
      " |      \n",
      " |      This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.\n",
      " |      \n",
      " |      See :ref:`locally-disable-grad-doc` for a comparison between\n",
      " |      `.eval()` and several similar mechanisms that may be confused with it.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  float(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``float`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  get_buffer(self, target: str) -> 'Tensor'\n",
      " |      Returns the buffer given by ``target`` if it exists,\n",
      " |      otherwise throws an error.\n",
      " |      \n",
      " |      See the docstring for ``get_submodule`` for a more detailed\n",
      " |      explanation of this method's functionality as well as how to\n",
      " |      correctly specify ``target``.\n",
      " |      \n",
      " |      Args:\n",
      " |          target: The fully-qualified string name of the buffer\n",
      " |              to look for. (See ``get_submodule`` for how to specify a\n",
      " |              fully-qualified string.)\n",
      " |      \n",
      " |      Returns:\n",
      " |          torch.Tensor: The buffer referenced by ``target``\n",
      " |      \n",
      " |      Raises:\n",
      " |          AttributeError: If the target string references an invalid\n",
      " |              path or resolves to something that is not a\n",
      " |              buffer\n",
      " |  \n",
      " |  get_extra_state(self) -> Any\n",
      " |      Returns any extra state to include in the module's state_dict.\n",
      " |      Implement this and a corresponding :func:`set_extra_state` for your module\n",
      " |      if you need to store extra state. This function is called when building the\n",
      " |      module's `state_dict()`.\n",
      " |      \n",
      " |      Note that extra state should be pickleable to ensure working serialization\n",
      " |      of the state_dict. We only provide provide backwards compatibility guarantees\n",
      " |      for serializing Tensors; other objects may break backwards compatibility if\n",
      " |      their serialized pickled form changes.\n",
      " |      \n",
      " |      Returns:\n",
      " |          object: Any extra state to store in the module's state_dict\n",
      " |  \n",
      " |  get_parameter(self, target: str) -> 'Parameter'\n",
      " |      Returns the parameter given by ``target`` if it exists,\n",
      " |      otherwise throws an error.\n",
      " |      \n",
      " |      See the docstring for ``get_submodule`` for a more detailed\n",
      " |      explanation of this method's functionality as well as how to\n",
      " |      correctly specify ``target``.\n",
      " |      \n",
      " |      Args:\n",
      " |          target: The fully-qualified string name of the Parameter\n",
      " |              to look for. (See ``get_submodule`` for how to specify a\n",
      " |              fully-qualified string.)\n",
      " |      \n",
      " |      Returns:\n",
      " |          torch.nn.Parameter: The Parameter referenced by ``target``\n",
      " |      \n",
      " |      Raises:\n",
      " |          AttributeError: If the target string references an invalid\n",
      " |              path or resolves to something that is not an\n",
      " |              ``nn.Parameter``\n",
      " |  \n",
      " |  get_submodule(self, target: str) -> 'Module'\n",
      " |      Returns the submodule given by ``target`` if it exists,\n",
      " |      otherwise throws an error.\n",
      " |      \n",
      " |      For example, let's say you have an ``nn.Module`` ``A`` that\n",
      " |      looks like this:\n",
      " |      \n",
      " |      .. code-block::text\n",
      " |      \n",
      " |          A(\n",
      " |              (net_b): Module(\n",
      " |                  (net_c): Module(\n",
      " |                      (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))\n",
      " |                  )\n",
      " |                  (linear): Linear(in_features=100, out_features=200, bias=True)\n",
      " |              )\n",
      " |          )\n",
      " |      \n",
      " |      (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested\n",
      " |      submodule ``net_b``, which itself has two submodules ``net_c``\n",
      " |      and ``linear``. ``net_c`` then has a submodule ``conv``.)\n",
      " |      \n",
      " |      To check whether or not we have the ``linear`` submodule, we\n",
      " |      would call ``get_submodule(\"net_b.linear\")``. To check whether\n",
      " |      we have the ``conv`` submodule, we would call\n",
      " |      ``get_submodule(\"net_b.net_c.conv\")``.\n",
      " |      \n",
      " |      The runtime of ``get_submodule`` is bounded by the degree\n",
      " |      of module nesting in ``target``. A query against\n",
      " |      ``named_modules`` achieves the same result, but it is O(N) in\n",
      " |      the number of transitive modules. So, for a simple check to see\n",
      " |      if some submodule exists, ``get_submodule`` should always be\n",
      " |      used.\n",
      " |      \n",
      " |      Args:\n",
      " |          target: The fully-qualified string name of the submodule\n",
      " |              to look for. (See above example for how to specify a\n",
      " |              fully-qualified string.)\n",
      " |      \n",
      " |      Returns:\n",
      " |          torch.nn.Module: The submodule referenced by ``target``\n",
      " |      \n",
      " |      Raises:\n",
      " |          AttributeError: If the target string references an invalid\n",
      " |              path or resolves to something that is not an\n",
      " |              ``nn.Module``\n",
      " |  \n",
      " |  half(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``half`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  load_state_dict(self, state_dict: 'OrderedDict[str, Tensor]', strict: bool = True)\n",
      " |      Copies parameters and buffers from :attr:`state_dict` into\n",
      " |      this module and its descendants. If :attr:`strict` is ``True``, then\n",
      " |      the keys of :attr:`state_dict` must exactly match the keys returned\n",
      " |      by this module's :meth:`~torch.nn.Module.state_dict` function.\n",
      " |      \n",
      " |      Args:\n",
      " |          state_dict (dict): a dict containing parameters and\n",
      " |              persistent buffers.\n",
      " |          strict (bool, optional): whether to strictly enforce that the keys\n",
      " |              in :attr:`state_dict` match the keys returned by this module's\n",
      " |              :meth:`~torch.nn.Module.state_dict` function. Default: ``True``\n",
      " |      \n",
      " |      Returns:\n",
      " |          ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:\n",
      " |              * **missing_keys** is a list of str containing the missing keys\n",
      " |              * **unexpected_keys** is a list of str containing the unexpected keys\n",
      " |      \n",
      " |      Note:\n",
      " |          If a parameter or buffer is registered as ``None`` and its corresponding key\n",
      " |          exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a\n",
      " |          ``RuntimeError``.\n",
      " |  \n",
      " |  modules(self) -> Iterator[ForwardRef('Module')]\n",
      " |      Returns an iterator over all modules in the network.\n",
      " |      \n",
      " |      Yields:\n",
      " |          Module: a module in the network\n",
      " |      \n",
      " |      Note:\n",
      " |          Duplicate modules are returned only once. In the following\n",
      " |          example, ``l`` will be returned only once.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> l = nn.Linear(2, 2)\n",
      " |          >>> net = nn.Sequential(l, l)\n",
      " |          >>> for idx, m in enumerate(net.modules()):\n",
      " |                  print(idx, '->', m)\n",
      " |      \n",
      " |          0 -> Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          )\n",
      " |          1 -> Linear(in_features=2, out_features=2, bias=True)\n",
      " |  \n",
      " |  named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, torch.Tensor]]\n",
      " |      Returns an iterator over module buffers, yielding both the\n",
      " |      name of the buffer as well as the buffer itself.\n",
      " |      \n",
      " |      Args:\n",
      " |          prefix (str): prefix to prepend to all buffer names.\n",
      " |          recurse (bool): if True, then yields buffers of this module\n",
      " |              and all submodules. Otherwise, yields only buffers that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, torch.Tensor): Tuple containing the name and buffer\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for name, buf in self.named_buffers():\n",
      " |          >>>    if name in ['running_var']:\n",
      " |          >>>        print(buf.size())\n",
      " |  \n",
      " |  named_children(self) -> Iterator[Tuple[str, ForwardRef('Module')]]\n",
      " |      Returns an iterator over immediate children modules, yielding both\n",
      " |      the name of the module as well as the module itself.\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, Module): Tuple containing a name and child module\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for name, module in model.named_children():\n",
      " |          >>>     if name in ['conv4', 'conv5']:\n",
      " |          >>>         print(module)\n",
      " |  \n",
      " |  named_modules(self, memo: Union[Set[ForwardRef('Module')], NoneType] = None, prefix: str = '', remove_duplicate: bool = True)\n",
      " |      Returns an iterator over all modules in the network, yielding\n",
      " |      both the name of the module as well as the module itself.\n",
      " |      \n",
      " |      Args:\n",
      " |          memo: a memo to store the set of modules already added to the result\n",
      " |          prefix: a prefix that will be added to the name of the module\n",
      " |          remove_duplicate: whether to remove the duplicated module instances in the result\n",
      " |          or not\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, Module): Tuple of name and module\n",
      " |      \n",
      " |      Note:\n",
      " |          Duplicate modules are returned only once. In the following\n",
      " |          example, ``l`` will be returned only once.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> l = nn.Linear(2, 2)\n",
      " |          >>> net = nn.Sequential(l, l)\n",
      " |          >>> for idx, m in enumerate(net.named_modules()):\n",
      " |                  print(idx, '->', m)\n",
      " |      \n",
      " |          0 -> ('', Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          ))\n",
      " |          1 -> ('0', Linear(in_features=2, out_features=2, bias=True))\n",
      " |  \n",
      " |  named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, torch.nn.parameter.Parameter]]\n",
      " |      Returns an iterator over module parameters, yielding both the\n",
      " |      name of the parameter as well as the parameter itself.\n",
      " |      \n",
      " |      Args:\n",
      " |          prefix (str): prefix to prepend to all parameter names.\n",
      " |          recurse (bool): if True, then yields parameters of this module\n",
      " |              and all submodules. Otherwise, yields only parameters that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, Parameter): Tuple containing the name and parameter\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for name, param in self.named_parameters():\n",
      " |          >>>    if name in ['bias']:\n",
      " |          >>>        print(param.size())\n",
      " |  \n",
      " |  parameters(self, recurse: bool = True) -> Iterator[torch.nn.parameter.Parameter]\n",
      " |      Returns an iterator over module parameters.\n",
      " |      \n",
      " |      This is typically passed to an optimizer.\n",
      " |      \n",
      " |      Args:\n",
      " |          recurse (bool): if True, then yields parameters of this module\n",
      " |              and all submodules. Otherwise, yields only parameters that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          Parameter: module parameter\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for param in model.parameters():\n",
      " |          >>>     print(type(param), param.size())\n",
      " |          <class 'torch.Tensor'> (20L,)\n",
      " |          <class 'torch.Tensor'> (20L, 1L, 5L, 5L)\n",
      " |  \n",
      " |  register_backward_hook(self, hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a backward hook on the module.\n",
      " |      \n",
      " |      This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and\n",
      " |      the behavior of this function will change in future versions.\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_buffer(self, name: str, tensor: Union[torch.Tensor, NoneType], persistent: bool = True) -> None\n",
      " |      Adds a buffer to the module.\n",
      " |      \n",
      " |      This is typically used to register a buffer that should not to be\n",
      " |      considered a model parameter. For example, BatchNorm's ``running_mean``\n",
      " |      is not a parameter, but is part of the module's state. Buffers, by\n",
      " |      default, are persistent and will be saved alongside parameters. This\n",
      " |      behavior can be changed by setting :attr:`persistent` to ``False``. The\n",
      " |      only difference between a persistent buffer and a non-persistent buffer\n",
      " |      is that the latter will not be a part of this module's\n",
      " |      :attr:`state_dict`.\n",
      " |      \n",
      " |      Buffers can be accessed as attributes using given names.\n",
      " |      \n",
      " |      Args:\n",
      " |          name (string): name of the buffer. The buffer can be accessed\n",
      " |              from this module using the given name\n",
      " |          tensor (Tensor or None): buffer to be registered. If ``None``, then operations\n",
      " |              that run on buffers, such as :attr:`cuda`, are ignored. If ``None``,\n",
      " |              the buffer is **not** included in the module's :attr:`state_dict`.\n",
      " |          persistent (bool): whether the buffer is part of this module's\n",
      " |              :attr:`state_dict`.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> self.register_buffer('running_mean', torch.zeros(num_features))\n",
      " |  \n",
      " |  register_forward_hook(self, hook: Callable[..., NoneType]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a forward hook on the module.\n",
      " |      \n",
      " |      The hook will be called every time after :func:`forward` has computed an output.\n",
      " |      It should have the following signature::\n",
      " |      \n",
      " |          hook(module, input, output) -> None or modified output\n",
      " |      \n",
      " |      The input contains only the positional arguments given to the module.\n",
      " |      Keyword arguments won't be passed to the hooks and only to the ``forward``.\n",
      " |      The hook can modify the output. It can modify the input inplace but\n",
      " |      it will not have effect on forward since this is called after\n",
      " |      :func:`forward` is called.\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_forward_pre_hook(self, hook: Callable[..., NoneType]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a forward pre-hook on the module.\n",
      " |      \n",
      " |      The hook will be called every time before :func:`forward` is invoked.\n",
      " |      It should have the following signature::\n",
      " |      \n",
      " |          hook(module, input) -> None or modified input\n",
      " |      \n",
      " |      The input contains only the positional arguments given to the module.\n",
      " |      Keyword arguments won't be passed to the hooks and only to the ``forward``.\n",
      " |      The hook can modify the input. User can either return a tuple or a\n",
      " |      single modified value in the hook. We will wrap the value into a tuple\n",
      " |      if a single value is returned(unless that value is already a tuple).\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_full_backward_hook(self, hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a backward hook on the module.\n",
      " |      \n",
      " |      The hook will be called every time the gradients with respect to module\n",
      " |      inputs are computed. The hook should have the following signature::\n",
      " |      \n",
      " |          hook(module, grad_input, grad_output) -> tuple(Tensor) or None\n",
      " |      \n",
      " |      The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients\n",
      " |      with respect to the inputs and outputs respectively. The hook should\n",
      " |      not modify its arguments, but it can optionally return a new gradient with\n",
      " |      respect to the input that will be used in place of :attr:`grad_input` in\n",
      " |      subsequent computations. :attr:`grad_input` will only correspond to the inputs given\n",
      " |      as positional arguments and all kwarg arguments are ignored. Entries\n",
      " |      in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor\n",
      " |      arguments.\n",
      " |      \n",
      " |      For technical reasons, when this hook is applied to a Module, its forward function will\n",
      " |      receive a view of each Tensor passed to the Module. Similarly the caller will receive a view\n",
      " |      of each Tensor returned by the Module's forward function.\n",
      " |      \n",
      " |      .. warning ::\n",
      " |          Modifying inputs or outputs inplace is not allowed when using backward hooks and\n",
      " |          will raise an error.\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_parameter(self, name: str, param: Union[torch.nn.parameter.Parameter, NoneType]) -> None\n",
      " |      Adds a parameter to the module.\n",
      " |      \n",
      " |      The parameter can be accessed as an attribute using given name.\n",
      " |      \n",
      " |      Args:\n",
      " |          name (string): name of the parameter. The parameter can be accessed\n",
      " |              from this module using the given name\n",
      " |          param (Parameter or None): parameter to be added to the module. If\n",
      " |              ``None``, then operations that run on parameters, such as :attr:`cuda`,\n",
      " |              are ignored. If ``None``, the parameter is **not** included in the\n",
      " |              module's :attr:`state_dict`.\n",
      " |  \n",
      " |  requires_grad_(self: ~T, requires_grad: bool = True) -> ~T\n",
      " |      Change if autograd should record operations on parameters in this\n",
      " |      module.\n",
      " |      \n",
      " |      This method sets the parameters' :attr:`requires_grad` attributes\n",
      " |      in-place.\n",
      " |      \n",
      " |      This method is helpful for freezing part of the module for finetuning\n",
      " |      or training parts of a model individually (e.g., GAN training).\n",
      " |      \n",
      " |      See :ref:`locally-disable-grad-doc` for a comparison between\n",
      " |      `.requires_grad_()` and several similar mechanisms that may be confused with it.\n",
      " |      \n",
      " |      Args:\n",
      " |          requires_grad (bool): whether autograd should record operations on\n",
      " |                                parameters in this module. Default: ``True``.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  set_extra_state(self, state: Any)\n",
      " |      This function is called from :func:`load_state_dict` to handle any extra state\n",
      " |      found within the `state_dict`. Implement this function and a corresponding\n",
      " |      :func:`get_extra_state` for your module if you need to store extra state within its\n",
      " |      `state_dict`.\n",
      " |      \n",
      " |      Args:\n",
      " |          state (dict): Extra state from the `state_dict`\n",
      " |  \n",
      " |  share_memory(self: ~T) -> ~T\n",
      " |      See :meth:`torch.Tensor.share_memory_`\n",
      " |  \n",
      " |  state_dict(self, destination=None, prefix='', keep_vars=False)\n",
      " |      Returns a dictionary containing a whole state of the module.\n",
      " |      \n",
      " |      Both parameters and persistent buffers (e.g. running averages) are\n",
      " |      included. Keys are corresponding parameter and buffer names.\n",
      " |      Parameters and buffers set to ``None`` are not included.\n",
      " |      \n",
      " |      Returns:\n",
      " |          dict:\n",
      " |              a dictionary containing a whole state of the module\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> module.state_dict().keys()\n",
      " |          ['bias', 'weight']\n",
      " |  \n",
      " |  to(self, *args, **kwargs)\n",
      " |      Moves and/or casts the parameters and buffers.\n",
      " |      \n",
      " |      This can be called as\n",
      " |      \n",
      " |      .. function:: to(device=None, dtype=None, non_blocking=False)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      .. function:: to(dtype, non_blocking=False)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      .. function:: to(tensor, non_blocking=False)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      .. function:: to(memory_format=torch.channels_last)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      Its signature is similar to :meth:`torch.Tensor.to`, but only accepts\n",
      " |      floating point or complex :attr:`dtype`\\ s. In addition, this method will\n",
      " |      only cast the floating point or complex parameters and buffers to :attr:`dtype`\n",
      " |      (if given). The integral parameters and buffers will be moved\n",
      " |      :attr:`device`, if that is given, but with dtypes unchanged. When\n",
      " |      :attr:`non_blocking` is set, it tries to convert/move asynchronously\n",
      " |      with respect to the host if possible, e.g., moving CPU Tensors with\n",
      " |      pinned memory to CUDA devices.\n",
      " |      \n",
      " |      See below for examples.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Args:\n",
      " |          device (:class:`torch.device`): the desired device of the parameters\n",
      " |              and buffers in this module\n",
      " |          dtype (:class:`torch.dtype`): the desired floating point or complex dtype of\n",
      " |              the parameters and buffers in this module\n",
      " |          tensor (torch.Tensor): Tensor whose dtype and device are the desired\n",
      " |              dtype and device for all parameters and buffers in this module\n",
      " |          memory_format (:class:`torch.memory_format`): the desired memory\n",
      " |              format for 4D parameters and buffers in this module (keyword\n",
      " |              only argument)\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |      \n",
      " |      Examples::\n",
      " |      \n",
      " |          >>> linear = nn.Linear(2, 2)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1913, -0.3420],\n",
      " |                  [-0.5113, -0.2325]])\n",
      " |          >>> linear.to(torch.double)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1913, -0.3420],\n",
      " |                  [-0.5113, -0.2325]], dtype=torch.float64)\n",
      " |          >>> gpu1 = torch.device(\"cuda:1\")\n",
      " |          >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1914, -0.3420],\n",
      " |                  [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')\n",
      " |          >>> cpu = torch.device(\"cpu\")\n",
      " |          >>> linear.to(cpu)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1914, -0.3420],\n",
      " |                  [-0.5112, -0.2324]], dtype=torch.float16)\n",
      " |      \n",
      " |          >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.3741+0.j,  0.2382+0.j],\n",
      " |                  [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)\n",
      " |          >>> linear(torch.ones(3, 2, dtype=torch.cdouble))\n",
      " |          tensor([[0.6122+0.j, 0.1150+0.j],\n",
      " |                  [0.6122+0.j, 0.1150+0.j],\n",
      " |                  [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)\n",
      " |  \n",
      " |  to_empty(self: ~T, *, device: Union[str, torch.device]) -> ~T\n",
      " |      Moves the parameters and buffers to the specified device without copying storage.\n",
      " |      \n",
      " |      Args:\n",
      " |          device (:class:`torch.device`): The desired device of the parameters\n",
      " |              and buffers in this module.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  train(self: ~T, mode: bool = True) -> ~T\n",
      " |      Sets the module in training mode.\n",
      " |      \n",
      " |      This has any effect only on certain modules. See documentations of\n",
      " |      particular modules for details of their behaviors in training/evaluation\n",
      " |      mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n",
      " |      etc.\n",
      " |      \n",
      " |      Args:\n",
      " |          mode (bool): whether to set training mode (``True``) or evaluation\n",
      " |                       mode (``False``). Default: ``True``.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  type(self: ~T, dst_type: Union[torch.dtype, str]) -> ~T\n",
      " |      Casts all parameters and buffers to :attr:`dst_type`.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Args:\n",
      " |          dst_type (type or string): the desired type\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  xpu(self: ~T, device: Union[int, torch.device, NoneType] = None) -> ~T\n",
      " |      Moves all model parameters and buffers to the XPU.\n",
      " |      \n",
      " |      This also makes associated parameters and buffers different objects. So\n",
      " |      it should be called before constructing optimizer if the module will\n",
      " |      live on XPU while being optimized.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Arguments:\n",
      " |          device (int, optional): if specified, all parameters will be\n",
      " |              copied to that device\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  zero_grad(self, set_to_none: bool = False) -> None\n",
      " |      Sets gradients of all model parameters to zero. See similar function\n",
      " |      under :class:`torch.optim.Optimizer` for more context.\n",
      " |      \n",
      " |      Args:\n",
      " |          set_to_none (bool): instead of setting to zero, set the grads to None.\n",
      " |              See :meth:`torch.optim.Optimizer.zero_grad` for details.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors inherited from torch.nn.modules.module.Module:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from torch.nn.modules.module.Module:\n",
      " |  \n",
      " |  T_destination = ~T_destination\n",
      " |  \n",
      " |  dump_patches = False\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(nn.Conv2d)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 0.2407, -0.3575],\n",
       "          [ 2.7207,  1.4058],\n",
       "          [ 6.8278,  4.6107]],\n",
       "\n",
       "         [[-0.5903, -1.0521],\n",
       "          [-4.4631, -1.0524],\n",
       "          [-5.8449,  4.4972]]]], grad_fn=<ThnnConv2DBackward0>)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conv2d(X)"
   ]
  },
  {
   "attachments": {
    "%E5%9B%BE%E7%89%87.png": {
     "image/png": "iVBORw0KGgoAAAANSUhEUgAABMgAAAAeCAYAAADZ5iIuAAAWT0lEQVR4nO2dT2vbyvfGvy9l3sQsTTahi3oT89tEZCHowtCFIRDMheDCxQSK6aKYQhAXgikUE7jgUFAgoMUFbYqyKM6iOIuiQEGLgBYBLQrePL/FjGxp9G/8J7HTnMUHWseWjs6co5l5NHP0v+l0CoIgCIIgCIIgCIIgCIJ4qfxv0wYQBEEQBEEQBEEQBEEQxCYhgYwgCIIgCIIgCIIgCIJ40ZBARhAEQRAEQRAEQRAEQbxoSCAjCIIgCIIgCIIgCIIgXjQkkBEEQRAEQRAEQRAEQRAvGhLIiKXxryxYpw78LbCFIMqgWP3TCeGdW7DOPYQbt+VpCX/YGJxasE4tOD83b8928Nzj4bnbr8HvAO65iNvtvU4fzqkF68rfAlsIgiAIgngKnlwgC3/Y6B8Z2OUMjO/COOrD/hEufbzozsHguInGDgNjHLv7bfQvJ1s62HokbgYweA29b9GTntf7yMBYH96mr/9FEMJ5VwNvjkjkWYJMrG4oZ4gqlo3zAKO3DOztCMHGr+HpCK864IyBvzJgHHThBJu3aTtYczzcO+jscDT/fSqhZEvj+bcP52MbxisuxltvOhh8Kx+/BVfdnNgcw9pjYKyGxoEB42y8+WvLxUOfMbCP3urHoj6HIAiCIJ4FTyqQRdd9NBhD42gA23XhOkN09zkYa6D/ffFBg3/VQYMx8P02BpcuXNfG8KSJGmOoHdkvR0i4GaB50IS1hA9X4dkLZIGD7oGB7lWweVsqCeGcGDCO7e2aMD0T8gSyTeQMUcWycb6lgsKjIq/ZHL6cvu4p4uH7AMaBgcH3xGf3oq/oXD5VX7GN8RzAPuRg3ED7dATHGaH/tlY5fgsummCsidGvxOfXfXDG0f1v2++/6xXI1D5nfGbAOBhgvPHrJAiCIAgi5gkFsjGs1wzsyE6v7npw0eUM7MRFtMjxftlocQZ+mF1pEDpdcMbRcZZfmUZU8+wFsl8jNBlD8+I5CGTEKjz7WCUq2EZB4bFZ4+T9j2OFeLjugzGG/vUztf+xuLFQZwwdJyFqRS56nIF9KI7BPIEsVzTbSh43x6hfIgiCIIjt4+kEskIxYrmB4PifOhgzMcytuSLFuGMnJcaFPx0MZa0W62wEL7XkP1Hz43cA72Igvnc6hFu4bSXC5KuobTRR/hZeD2GdDlK/jX55GJ0VnT/+zRBemP68sn7STyddfybx/+B6NKtPM3R86Y8QvjOU1zfA6DrI+uHKxzT04cQ1Qs5sTBS7Cgd3QeI6zx34Yb6ti9s2J/+3WT+mvufGx5J1Rd63UGcM9cNert8zcZHyR6L9yq5XI/aiHzasUwujm/TT9MAVMWjfRsjWpFnRLjVmCmJw9v/7gnZRcsX5WSZK68eWOHaIyeyc2XZO5mGynQcXHoLfFbFakjPJthoUbNeen0/64qdTEkO69yBJ0jenQzi3ekK/1rFz2sRP+XiCMOm7xHXl59ICuT9r12TMFNx/1JpEqd/kxYJ+P5Ly0xL5unA76fhkwdgXedmByRiY2RHfq6iTlGw/bTs0/TW9n6R9od4HdPLrTn7nVj2+vF87vrZ/svFQVNMrUWMq9IRdxyYYYzCPE/2u/NvwWrmuSjuSx9e451XFc1keBC4GeTbGNnydJB5CaowBzj2E99In5x7C3xHCMET0O8fOw+IVn2kxTNjSO6yDsTpa763q+6ZWnlX3AVokz3XuwA8LBDLtvqkgB67FbzsmA2MmOlTnjCAIgiC2hi0o0u9jeMDAF3pCJwdlB0VbSyIE31243wM5IIzgfTLAGcduqwvrtIvWK7m18zpSjtlCa0/U1ugem7JWWgt2wZPO8LINxuqwbpKfh3COGdhra7Z03v/aFls/99roJs7fSQyKip6qVj5lVJ94y/839g3UXpnonHTQ3KuJzz6OMDzkqO010Tme1xJpfQ3SfnjdQIPX0HjbQWdWM66F0V25XbPrNDvofejA3GFgO23Yd+uwTcSLfVQDYzWYxz30js3MllrhRwOtwwa4PIeZOtYYgwMDxv6uRv2e2B8GjJ0aGkdddBP+GF700eC7MI466LxtoMYY2F4f42gei9Wx54tz7M3jZXrvoMMZ+EzkVSdMK9pVsEpCjUHx/zqM/ZqI3ZNEu3wZob8n6v51ZnUAy7bb6MfW9MFDf5+D8d1UO/M3FsYP09x4aBx1YZ20xPH2OnDKYrUgZ5qHLfCdBtonXbTjuPw0TkwsI3gfG2BxLp+00djhMPYbFSsidOJgiundSKyMfWWi86GHzptdcMZhfPJKVthqHjvn3js6TPt4lzPw/T68h7RfGnsN8FcG2sfJfE3bVJn70ymmdw46ewxsp4H2iYXukYjNxjsnHddJYeDORnuHge2Y6Hyw5rGQWj2sI5BFGP9jCj+9kTa+4mDcxGAmTq+3nbR8oqIR+6Kuk8zrnQaM0jpO6XumFduh5txS/hKlE4ykL8waGOMw/0nkjVZ+eehzhvqpch03FuqMo+dGC9wbCu6XmfhICCByy70h7artJba/5T3g07JDHv9tCy0e36fz+omCe+VCeVCwSj9e+XUVZuxunVjottTYTd6nRa3Yxl8FAtiDgw5jqP9TXEMs3afIvjeuYbZfUTtPK8/0+oBK5LlEe3bFuXZqoq2S41Ptvkkh0eeI/DVkn7ntddgIgiAI4mWxcYEs+q8LzhqKwFTFGH2+wKqzn0OYjCkFdn0MzZwBNGugnyw6eycGxsaXgqd7kYsuUwb18WfxoFGePz2hDOVEWxUj1ieQ1T+4iYFyBPeEy8/Tg0sxGI7FmdgPTQxTE1s5eEwMvrOrcqSfzxO+evDQ30sIPSvZNoV/bmZsE7Xt+GwCIPwo/B2qx0qKqlpbLAv8Ia+VsWZqkhl966UnI1qxN48z84uP6TSC96EOxjtw7hU7MvG6pF0LCWRKm8b2q9f14KGXs3Kz0peZ2JLxoE7g5ffqiTzyv5hZ8SIUMZf0r65Axg9H8GcrD2Q7sC7ceCIrJ5upyf9vKTSVCWRacSCF9YNBakXq5LNZslp2gRhTCK86YMzAILli53YAkzGYcXsX5Ku4dyUeDOjk/jSEfSSEAS8xkRSxGQsgapxHcN9zsNe99G/+E9vo5/GrIZBJoaV1kfZTqo7XOttJyycq+rGvu/0rcnvgrJ4uDi5LG5Q+mNLxV3wcpdSB/28LnNXneamZX+PTeup+P53K1eI8zl1d/ywhkBXdG6bTnL5C1w55fOV78f24uAbXcnngfzHAWBv2/fw74rMOnDB9HDexasu/aCXGYfOxUM8p6xtjgT3ZT2VZfoulXp7p9gHlyHuT4pe4bu48PhbJT4WcuKItlgRBEASxfWxWIJODCnUlQjXxU1nNwU/u1gB1cCIHhZm3p1VNQuIB53xQH/3XBWPGbAA3/sTBeG8+yY65t9FOiG/rFsjyxQ8jM9EW21UVP+T4dnxaTwy0s3aJiUz2Ov1/m/NJ0Eq2iSfk/L1ar87HqDmvYzfzo/L0OOPHRQSyoomVWjsvFE/UZ8fUij3ppy+mGHhfWSnBL9+OFe1aSCArismu0tZyQlOYl5qxFdhoJUWazPfi8wqhPBsP8crOeTzpCmS9b+X+GH/iOdetMeHTvAfZh9kJYdFvFzt2luBrKyuQTaeIwhChsoIsU49J3rvihwBauV+YbxMM3xowP49zYyQKQ4ShIiRkjlUtkHkfeUJoScSK0xOrWO7X205aPlHt1I79RK5Xrb6OQmGXZi4u4i8RQ3nirVzNFN+HNPNLiHLJFdlihfnsAZS2fx5ZINO2Qx5frdFV2fcsmQdSlG1fxn2H8N+sHWQ/kLXbQ282FikaCyWJxTET1k15of3lBTKdPNPvA0qR7Zl9EKrEx0L5qUACGUEQBEE8CzYnkMVP0nOK7FezoEA2nWJWc+OkLbajzJa3VwlD1ZOQ+Al9PKj3PvLESqWyiYgUduRqgqcRyLLHF59XC2Sq8JcrML5uoRfXq4k5NufnXcU2ORkQNcPS5+gkVnho+3EdAlkmLsTn6WNWxd48HuKVWdkVJitM+PLsWotAlo1J7+NyAlkqtsqKZH/rzVculbWfXJEXi4y6Alm5P4rt110RURUH6e1qQ9g3frEwtuCxMyhbhYaXY/jqFqHCtkhOvFfLfa0YCTyMznrovBXXFm+P1hfIFql1uY520vSJem7d2C/N9Rxk7arecXN2Tbulq7D1/FXWN3kfEqt/te/7QlhLrr42ktes7Z9HFsi07ShooyUEMr08kH1IvBpXnme2Uk3aLeqrJemh9Tq2s7rtxUruqi3cRW2sX6S/Ms8W6ANKKWxPpf0Wys/qc5BARhAEQRDbx2YEslgc209vtdFH5wlnEvm0c6eJ7rkN13Xhui4Gf61HIEvXThH/nj9hLBtspv+27QKZOC5H/3ueXfJ3fHc2AUsj64ysQSATNcNy+NvZQoFMJ/ZiIngf68hsXcy1488SyFKxVTYJSf6trP2Uv21eIFsgDu4ncM67aMvJL+MGuqUFnBeJMQVZbHpWv45xGH8nXghS2BbJ4tyr5X5VjMRbqI2/LIwccW3u1x6MRxHI1tVOmj4pi+/Kv2kKZLP+tg3rwpHXZKN38MgCWfJvC9z3/XNzJqz5X4z0lktt/2xQIEv9bX0CmV4exD4V2yyDi2Z6FaO0TdRXy+EsfxWnapt9qD/+WvktlmV5tkAfUMo6BLKq+xsJZARBEATxLNiAQCYnIHvLimMC7yNHcrufeo7hAZu9WSm31s60QOBZSiBL1E75bqGe2nIS19HIe6GArKUmj73tApnYTjP/fdqususssXUR2+T2kMJ6cBXH2oRAphd7gui7mAC1DvO2iP7ZAlkqtmSdr7z6PKnvlcXD9z544vrWI5BFcE9YpkZSmZ9iFomDFFEA530DjDUw+LHmY2eIEDg9NBhD42xS6pf0vUsz92W7qtvsymNEbtV756S3UC28xVLaWDGpX187afqkwEeVsT+dQrtv+kduX071t+vxlzh23rYyKaLEx1/kvv9zCJPVYd2I1VCp/Nb2zyMLZNp2rEsg082D6Wz7c/tyAvtI2X5YYrd+bKgvQSpnZYGsLM8W6ANK+TmEkesXpf0Wyk8FEsgIgiAI4lnwxAJZCPd9o/otXjrcWGiwuKh59m/1RJ0IMRhrwU49sY/gvFufQBbXTjHN+ryAsUTUwsi+iCC67qfezqVuYYztdE82IJCpdT9mRWzn4oA6uBPbLvLq0YQI7tdhm9w+YuZMOu+D2ZbEbRLI9GJvimk0lluObQSJouDFb+lbUSArmBCIgs6PLJBVxpasNVe0zXTW/nHRd1WwkivxEisn1iOQxbmcjfHJZ6N0cqQVB0V1cG4HaJRM9LRjTPGlqO+j5tIEg//LrpjoOMqEUHkznlbuy9W1mXpBkYsej7fVqXEt4pZ/Ut7wdjtYcAVZ7KesjcJ28bBlne2k5xMV3dgvy/WcnOR9JUcmGJSuINPzV1zIP7ONTd5b1Zc96N33xbXWD1swM+fX9U9OkfuTnHiXL9RZvEi/rh3rEsh080DEl33EwEwTZvLtn4lc42qNyukUURAUvOBgNZYWyLTyTL8PKEe2p+qXTHwskp8KJJARBEEQxLPgCQWy+M1nu+icu7OtK0nGv8RALrjswHhrVawwi4/HYX5yMAlChKGP8aWF5o4yYJJiWuPdEN5diODWxfBdA4ytUSCLV63lDnrHsPYY2F4Hw2sfYRjCvx6gvaPUYJNPfvnhAN5diDCYwDltiteMP7VAxhga7+2ZX91PJjjjaH2dX1tmcCdFHrbTxkBeZ3jnwnpbA9u3xCvtV7ItXmXFUDuSPgpD+K5oc0O+WVBb1JFPn+snDvwwmBcmT7GiEKUVexHGnxrpVWPyd/OtlmsWyOIVCXs92LcBwtCHd96FwdnjC2QasRVedcCTuR1MYH8wwNW6N4p/w9CHd9ZGTXn73roEslmMz3LZh3feEW86K5vwacVBCOdYFL7uu3H+eBgeq280XebYWeY+duGH0ndfOmnBQ/qF8dYsp4ObETp7DGyvL3JaN/enYpUFZzW0zzxxzjtP2MpbsHNX/iR84kwQhD7Gl32YXL3XakzqYxF6P475ABNHHKvxSb6VdJ3tpOmT4napiH3Nvkk9nn9jo/9GvDV4ZX9l2idEcGujt6+sEl/wvi+E6Hz79PyTjQdxTI6WjL3g1hFtofpQir/Nz2MEYYiw4AUTenasSyDTzQOBeNjGcl+yIHKQw/gQ34MDjC+6MDiXxf2rV5CNP7fRPhs/8goyzTzT6gPkNZXkSuyXuD2DW2eeJ4nf6eenQk4OiBWYTQxuAoT38kUav2x0DpqwNOq7EQRBEASxfp5QIJMDxRLiQd7krFHw5F0lwuSiC3MneZwazJMRJorY4V/Fk//4OzacMyMhSqwqkMVP1gsGfQ8TjP42RA0NbTsZam8HsD+nRaIMjyGQNS2MZuKcsLepDIhzRZJQvU6O3dYA43AdtgnCHyN09/m8zfkuWmfjxVeQTSOMT+e25q/QWb1If1Xsxa+ST6+GjOuRFcXn6nZFNwMhJsvr5/tdOP/2Hl8g04it6XSK8JuVtu9VC9a3bMHl6FaJhx0T3YtJeayukjN3Tup8NbML60P1hK/6HjTF9LcP+8RM+Ea0y+i2fLKkdey8310q909upH0XryD7nPYx3+/CUY9blfsyrtV7ds3sJlYU58T1wxiDWMiQcTBwR+gwlng7oOaql3sPVmt3bqNy71h7O2n5JIte7Ov2TRHGZ4l847tonbkYvWNgrFe+ekXDX9NpCC+Vzxy7LQteUtBd8L4/3yaYX2C92j958eDDUfrg5pmNQUY4CWAfxfEmbSsQtKrtWGORfq08kMQrxQpiI3DTdrMdE91Lnbqpwo9Dk4HtqSuB81lpi6VmnlX3AfKFSKW5kpcnefGh3zelyF2ZaKMdHyf29w+xQi7vTZkEQRAEQTw+m3uL5ZrJfQV6huTrwdfP+LQ+f3tUEb91bBDfCbW2BqwbZXCsZW/xdT7qNTyIFWQ6T7Grj/PYT2sfN/ZWIQrDgtVz62a52NLL7adqx3y/6dfU0Y0DmT8LtcvyMVboY2VSp9UWmrm/cNyts32jqnvHmttpyfuhduzrEOfbo/gr4Ysn7LeW8o/WtSwWn2ttpyp08uDeRrvsjYqbsHslNPOsyDdyq6RWwf4F8mQ9/tvesQFBEARBvET+GIFs4zy46PLiJ97Ph/XWHyGIOX9CbPmwT0fwk5OZ3xMMimrjPXe03jxJEMQ24X8xc18m8lIRb/Pswn2SB0EEQRAEQTxnSCBblZ8OrNMuWq94uibPs+VPEDGI7eQPiK0HD/19DrZjovPBmuc+N8rrzzxXSCAjiGdCCO/cQu/YFPW3vmqslnop3E/gVWxTJwiCIAiCmE5JIFud7wMYBwaax4N0zZVnixhkW+de+VZRgliYPyW2QvjOEL3jJoyDNrpnNsZ/RO7n8NOBdWrBqawHSRDEZgng/G3AOGijfzlZvfQAQRAEQRDEC4QEMoIgCIIgCIIgCIIgCOJFQwIZQRAEQRAEQRAEQRAE8aIhgYwgCIIgCIIgCIIgCIJ40ZBARhAEQRAEQRAEQRAEQbxoSCAjCIIgCIIgCIIgCIIgXjQkkBEEQRAEQRAEQRAEQRAvmv8HHk93a82UoMQAAAAASUVORK5CYII="
    }
   },
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[[ 5.,  6.,  7.,  7.],\n",
       "          [ 9., 10., 11., 11.],\n",
       "          [13., 14., 15., 15.],\n",
       "          [13., 14., 15., 15.]],\n",
       "\n",
       "         [[ 6.,  7.,  8.,  8.],\n",
       "          [10., 11., 12., 12.],\n",
       "          [14., 15., 16., 16.],\n",
       "          [14., 15., 16., 16.]]]])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conv2d = nn.MaxPool2d((3, 3), padding=(1, 1), stride=(1, 1))\n",
    "conv2d(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Help on class MaxPool2d in module torch.nn.modules.pooling:\n",
      "\n",
      "class MaxPool2d(_MaxPoolNd)\n",
      " |  MaxPool2d(kernel_size: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...], NoneType] = None, padding: Union[int, Tuple[int, ...]] = 0, dilation: Union[int, Tuple[int, ...]] = 1, return_indices: bool = False, ceil_mode: bool = False) -> None\n",
      " |  \n",
      " |  Applies a 2D max pooling over an input signal composed of several input\n",
      " |  planes.\n",
      " |  \n",
      " |  In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,\n",
      " |  output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`\n",
      " |  can be precisely described as:\n",
      " |  \n",
      " |  .. math::\n",
      " |      \\begin{aligned}\n",
      " |          out(N_i, C_j, h, w) ={} & \\max_{m=0, \\ldots, kH-1} \\max_{n=0, \\ldots, kW-1} \\\\\n",
      " |                                  & \\text{input}(N_i, C_j, \\text{stride[0]} \\times h + m,\n",
      " |                                                 \\text{stride[1]} \\times w + n)\n",
      " |      \\end{aligned}\n",
      " |  \n",
      " |  If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides\n",
      " |  for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.\n",
      " |  It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.\n",
      " |  \n",
      " |  Note:\n",
      " |      When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding\n",
      " |      or the input. Sliding windows that would start in the right padded region are ignored.\n",
      " |  \n",
      " |  The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n",
      " |  \n",
      " |      - a single ``int`` -- in which case the same value is used for the height and width dimension\n",
      " |      - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,\n",
      " |        and the second `int` for the width dimension\n",
      " |  \n",
      " |  Args:\n",
      " |      kernel_size: the size of the window to take a max over\n",
      " |      stride: the stride of the window. Default value is :attr:`kernel_size`\n",
      " |      padding: implicit zero padding to be added on both sides\n",
      " |      dilation: a parameter that controls the stride of elements in the window\n",
      " |      return_indices: if ``True``, will return the max indices along with the outputs.\n",
      " |                      Useful for :class:`torch.nn.MaxUnpool2d` later\n",
      " |      ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape\n",
      " |  \n",
      " |  Shape:\n",
      " |      - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`\n",
      " |      - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where\n",
      " |  \n",
      " |        .. math::\n",
      " |            H_{out} = \\left\\lfloor\\frac{H_{in} + 2 * \\text{padding[0]} - \\text{dilation[0]}\n",
      " |                  \\times (\\text{kernel\\_size[0]} - 1) - 1}{\\text{stride[0]}} + 1\\right\\rfloor\n",
      " |  \n",
      " |        .. math::\n",
      " |            W_{out} = \\left\\lfloor\\frac{W_{in} + 2 * \\text{padding[1]} - \\text{dilation[1]}\n",
      " |                  \\times (\\text{kernel\\_size[1]} - 1) - 1}{\\text{stride[1]}} + 1\\right\\rfloor\n",
      " |  \n",
      " |  Examples::\n",
      " |  \n",
      " |      >>> # pool of square window of size=3, stride=2\n",
      " |      >>> m = nn.MaxPool2d(3, stride=2)\n",
      " |      >>> # pool of non-square window\n",
      " |      >>> m = nn.MaxPool2d((3, 2), stride=(2, 1))\n",
      " |      >>> input = torch.randn(20, 16, 50, 32)\n",
      " |      >>> output = m(input)\n",
      " |  \n",
      " |  .. _link:\n",
      " |      https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md\n",
      " |  \n",
      " |  Method resolution order:\n",
      " |      MaxPool2d\n",
      " |      _MaxPoolNd\n",
      " |      torch.nn.modules.module.Module\n",
      " |      builtins.object\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  forward(self, input: torch.Tensor) -> torch.Tensor\n",
      " |      Defines the computation performed at every call.\n",
      " |      \n",
      " |      Should be overridden by all subclasses.\n",
      " |      \n",
      " |      .. note::\n",
      " |          Although the recipe for forward pass needs to be defined within\n",
      " |          this function, one should call the :class:`Module` instance afterwards\n",
      " |          instead of this since the former takes care of running the\n",
      " |          registered hooks while the latter silently ignores them.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes defined here:\n",
      " |  \n",
      " |  __annotations__ = {'dilation': typing.Union[int, typing.Tuple[int, int...\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from _MaxPoolNd:\n",
      " |  \n",
      " |  __init__(self, kernel_size: Union[int, Tuple[int, ...]], stride: Union[int, Tuple[int, ...], NoneType] = None, padding: Union[int, Tuple[int, ...]] = 0, dilation: Union[int, Tuple[int, ...]] = 1, return_indices: bool = False, ceil_mode: bool = False) -> None\n",
      " |      Initializes internal Module state, shared by both nn.Module and ScriptModule.\n",
      " |  \n",
      " |  extra_repr(self) -> str\n",
      " |      Set the extra representation of the module\n",
      " |      \n",
      " |      To print customized extra information, you should re-implement\n",
      " |      this method in your own modules. Both single-line and multi-line\n",
      " |      strings are acceptable.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from _MaxPoolNd:\n",
      " |  \n",
      " |  __constants__ = ['kernel_size', 'stride', 'padding', 'dilation', 'retu...\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Methods inherited from torch.nn.modules.module.Module:\n",
      " |  \n",
      " |  __call__ = _call_impl(self, *input, **kwargs)\n",
      " |  \n",
      " |  __delattr__(self, name)\n",
      " |      Implement delattr(self, name).\n",
      " |  \n",
      " |  __dir__(self)\n",
      " |      Default dir() implementation.\n",
      " |  \n",
      " |  __getattr__(self, name: str) -> Union[torch.Tensor, ForwardRef('Module')]\n",
      " |  \n",
      " |  __repr__(self)\n",
      " |      Return repr(self).\n",
      " |  \n",
      " |  __setattr__(self, name: str, value: Union[torch.Tensor, ForwardRef('Module')]) -> None\n",
      " |      Implement setattr(self, name, value).\n",
      " |  \n",
      " |  __setstate__(self, state)\n",
      " |  \n",
      " |  add_module(self, name: str, module: Union[ForwardRef('Module'), NoneType]) -> None\n",
      " |      Adds a child module to the current module.\n",
      " |      \n",
      " |      The module can be accessed as an attribute using the given name.\n",
      " |      \n",
      " |      Args:\n",
      " |          name (string): name of the child module. The child module can be\n",
      " |              accessed from this module using the given name\n",
      " |          module (Module): child module to be added to the module.\n",
      " |  \n",
      " |  apply(self: ~T, fn: Callable[[ForwardRef('Module')], NoneType]) -> ~T\n",
      " |      Applies ``fn`` recursively to every submodule (as returned by ``.children()``)\n",
      " |      as well as self. Typical use includes initializing the parameters of a model\n",
      " |      (see also :ref:`nn-init-doc`).\n",
      " |      \n",
      " |      Args:\n",
      " |          fn (:class:`Module` -> None): function to be applied to each submodule\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> @torch.no_grad()\n",
      " |          >>> def init_weights(m):\n",
      " |          >>>     print(m)\n",
      " |          >>>     if type(m) == nn.Linear:\n",
      " |          >>>         m.weight.fill_(1.0)\n",
      " |          >>>         print(m.weight)\n",
      " |          >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))\n",
      " |          >>> net.apply(init_weights)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 1.,  1.],\n",
      " |                  [ 1.,  1.]])\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 1.,  1.],\n",
      " |                  [ 1.,  1.]])\n",
      " |          Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          )\n",
      " |          Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          )\n",
      " |  \n",
      " |  bfloat16(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``bfloat16`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  buffers(self, recurse: bool = True) -> Iterator[torch.Tensor]\n",
      " |      Returns an iterator over module buffers.\n",
      " |      \n",
      " |      Args:\n",
      " |          recurse (bool): if True, then yields buffers of this module\n",
      " |              and all submodules. Otherwise, yields only buffers that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          torch.Tensor: module buffer\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for buf in model.buffers():\n",
      " |          >>>     print(type(buf), buf.size())\n",
      " |          <class 'torch.Tensor'> (20L,)\n",
      " |          <class 'torch.Tensor'> (20L, 1L, 5L, 5L)\n",
      " |  \n",
      " |  children(self) -> Iterator[ForwardRef('Module')]\n",
      " |      Returns an iterator over immediate children modules.\n",
      " |      \n",
      " |      Yields:\n",
      " |          Module: a child module\n",
      " |  \n",
      " |  cpu(self: ~T) -> ~T\n",
      " |      Moves all model parameters and buffers to the CPU.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  cuda(self: ~T, device: Union[int, torch.device, NoneType] = None) -> ~T\n",
      " |      Moves all model parameters and buffers to the GPU.\n",
      " |      \n",
      " |      This also makes associated parameters and buffers different objects. So\n",
      " |      it should be called before constructing optimizer if the module will\n",
      " |      live on GPU while being optimized.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Args:\n",
      " |          device (int, optional): if specified, all parameters will be\n",
      " |              copied to that device\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  double(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``double`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  eval(self: ~T) -> ~T\n",
      " |      Sets the module in evaluation mode.\n",
      " |      \n",
      " |      This has any effect only on certain modules. See documentations of\n",
      " |      particular modules for details of their behaviors in training/evaluation\n",
      " |      mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n",
      " |      etc.\n",
      " |      \n",
      " |      This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.\n",
      " |      \n",
      " |      See :ref:`locally-disable-grad-doc` for a comparison between\n",
      " |      `.eval()` and several similar mechanisms that may be confused with it.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  float(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``float`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  get_buffer(self, target: str) -> 'Tensor'\n",
      " |      Returns the buffer given by ``target`` if it exists,\n",
      " |      otherwise throws an error.\n",
      " |      \n",
      " |      See the docstring for ``get_submodule`` for a more detailed\n",
      " |      explanation of this method's functionality as well as how to\n",
      " |      correctly specify ``target``.\n",
      " |      \n",
      " |      Args:\n",
      " |          target: The fully-qualified string name of the buffer\n",
      " |              to look for. (See ``get_submodule`` for how to specify a\n",
      " |              fully-qualified string.)\n",
      " |      \n",
      " |      Returns:\n",
      " |          torch.Tensor: The buffer referenced by ``target``\n",
      " |      \n",
      " |      Raises:\n",
      " |          AttributeError: If the target string references an invalid\n",
      " |              path or resolves to something that is not a\n",
      " |              buffer\n",
      " |  \n",
      " |  get_extra_state(self) -> Any\n",
      " |      Returns any extra state to include in the module's state_dict.\n",
      " |      Implement this and a corresponding :func:`set_extra_state` for your module\n",
      " |      if you need to store extra state. This function is called when building the\n",
      " |      module's `state_dict()`.\n",
      " |      \n",
      " |      Note that extra state should be pickleable to ensure working serialization\n",
      " |      of the state_dict. We only provide provide backwards compatibility guarantees\n",
      " |      for serializing Tensors; other objects may break backwards compatibility if\n",
      " |      their serialized pickled form changes.\n",
      " |      \n",
      " |      Returns:\n",
      " |          object: Any extra state to store in the module's state_dict\n",
      " |  \n",
      " |  get_parameter(self, target: str) -> 'Parameter'\n",
      " |      Returns the parameter given by ``target`` if it exists,\n",
      " |      otherwise throws an error.\n",
      " |      \n",
      " |      See the docstring for ``get_submodule`` for a more detailed\n",
      " |      explanation of this method's functionality as well as how to\n",
      " |      correctly specify ``target``.\n",
      " |      \n",
      " |      Args:\n",
      " |          target: The fully-qualified string name of the Parameter\n",
      " |              to look for. (See ``get_submodule`` for how to specify a\n",
      " |              fully-qualified string.)\n",
      " |      \n",
      " |      Returns:\n",
      " |          torch.nn.Parameter: The Parameter referenced by ``target``\n",
      " |      \n",
      " |      Raises:\n",
      " |          AttributeError: If the target string references an invalid\n",
      " |              path or resolves to something that is not an\n",
      " |              ``nn.Parameter``\n",
      " |  \n",
      " |  get_submodule(self, target: str) -> 'Module'\n",
      " |      Returns the submodule given by ``target`` if it exists,\n",
      " |      otherwise throws an error.\n",
      " |      \n",
      " |      For example, let's say you have an ``nn.Module`` ``A`` that\n",
      " |      looks like this:\n",
      " |      \n",
      " |      .. code-block::text\n",
      " |      \n",
      " |          A(\n",
      " |              (net_b): Module(\n",
      " |                  (net_c): Module(\n",
      " |                      (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))\n",
      " |                  )\n",
      " |                  (linear): Linear(in_features=100, out_features=200, bias=True)\n",
      " |              )\n",
      " |          )\n",
      " |      \n",
      " |      (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested\n",
      " |      submodule ``net_b``, which itself has two submodules ``net_c``\n",
      " |      and ``linear``. ``net_c`` then has a submodule ``conv``.)\n",
      " |      \n",
      " |      To check whether or not we have the ``linear`` submodule, we\n",
      " |      would call ``get_submodule(\"net_b.linear\")``. To check whether\n",
      " |      we have the ``conv`` submodule, we would call\n",
      " |      ``get_submodule(\"net_b.net_c.conv\")``.\n",
      " |      \n",
      " |      The runtime of ``get_submodule`` is bounded by the degree\n",
      " |      of module nesting in ``target``. A query against\n",
      " |      ``named_modules`` achieves the same result, but it is O(N) in\n",
      " |      the number of transitive modules. So, for a simple check to see\n",
      " |      if some submodule exists, ``get_submodule`` should always be\n",
      " |      used.\n",
      " |      \n",
      " |      Args:\n",
      " |          target: The fully-qualified string name of the submodule\n",
      " |              to look for. (See above example for how to specify a\n",
      " |              fully-qualified string.)\n",
      " |      \n",
      " |      Returns:\n",
      " |          torch.nn.Module: The submodule referenced by ``target``\n",
      " |      \n",
      " |      Raises:\n",
      " |          AttributeError: If the target string references an invalid\n",
      " |              path or resolves to something that is not an\n",
      " |              ``nn.Module``\n",
      " |  \n",
      " |  half(self: ~T) -> ~T\n",
      " |      Casts all floating point parameters and buffers to ``half`` datatype.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  load_state_dict(self, state_dict: 'OrderedDict[str, Tensor]', strict: bool = True)\n",
      " |      Copies parameters and buffers from :attr:`state_dict` into\n",
      " |      this module and its descendants. If :attr:`strict` is ``True``, then\n",
      " |      the keys of :attr:`state_dict` must exactly match the keys returned\n",
      " |      by this module's :meth:`~torch.nn.Module.state_dict` function.\n",
      " |      \n",
      " |      Args:\n",
      " |          state_dict (dict): a dict containing parameters and\n",
      " |              persistent buffers.\n",
      " |          strict (bool, optional): whether to strictly enforce that the keys\n",
      " |              in :attr:`state_dict` match the keys returned by this module's\n",
      " |              :meth:`~torch.nn.Module.state_dict` function. Default: ``True``\n",
      " |      \n",
      " |      Returns:\n",
      " |          ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:\n",
      " |              * **missing_keys** is a list of str containing the missing keys\n",
      " |              * **unexpected_keys** is a list of str containing the unexpected keys\n",
      " |      \n",
      " |      Note:\n",
      " |          If a parameter or buffer is registered as ``None`` and its corresponding key\n",
      " |          exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a\n",
      " |          ``RuntimeError``.\n",
      " |  \n",
      " |  modules(self) -> Iterator[ForwardRef('Module')]\n",
      " |      Returns an iterator over all modules in the network.\n",
      " |      \n",
      " |      Yields:\n",
      " |          Module: a module in the network\n",
      " |      \n",
      " |      Note:\n",
      " |          Duplicate modules are returned only once. In the following\n",
      " |          example, ``l`` will be returned only once.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> l = nn.Linear(2, 2)\n",
      " |          >>> net = nn.Sequential(l, l)\n",
      " |          >>> for idx, m in enumerate(net.modules()):\n",
      " |                  print(idx, '->', m)\n",
      " |      \n",
      " |          0 -> Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          )\n",
      " |          1 -> Linear(in_features=2, out_features=2, bias=True)\n",
      " |  \n",
      " |  named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, torch.Tensor]]\n",
      " |      Returns an iterator over module buffers, yielding both the\n",
      " |      name of the buffer as well as the buffer itself.\n",
      " |      \n",
      " |      Args:\n",
      " |          prefix (str): prefix to prepend to all buffer names.\n",
      " |          recurse (bool): if True, then yields buffers of this module\n",
      " |              and all submodules. Otherwise, yields only buffers that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, torch.Tensor): Tuple containing the name and buffer\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for name, buf in self.named_buffers():\n",
      " |          >>>    if name in ['running_var']:\n",
      " |          >>>        print(buf.size())\n",
      " |  \n",
      " |  named_children(self) -> Iterator[Tuple[str, ForwardRef('Module')]]\n",
      " |      Returns an iterator over immediate children modules, yielding both\n",
      " |      the name of the module as well as the module itself.\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, Module): Tuple containing a name and child module\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for name, module in model.named_children():\n",
      " |          >>>     if name in ['conv4', 'conv5']:\n",
      " |          >>>         print(module)\n",
      " |  \n",
      " |  named_modules(self, memo: Union[Set[ForwardRef('Module')], NoneType] = None, prefix: str = '', remove_duplicate: bool = True)\n",
      " |      Returns an iterator over all modules in the network, yielding\n",
      " |      both the name of the module as well as the module itself.\n",
      " |      \n",
      " |      Args:\n",
      " |          memo: a memo to store the set of modules already added to the result\n",
      " |          prefix: a prefix that will be added to the name of the module\n",
      " |          remove_duplicate: whether to remove the duplicated module instances in the result\n",
      " |          or not\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, Module): Tuple of name and module\n",
      " |      \n",
      " |      Note:\n",
      " |          Duplicate modules are returned only once. In the following\n",
      " |          example, ``l`` will be returned only once.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> l = nn.Linear(2, 2)\n",
      " |          >>> net = nn.Sequential(l, l)\n",
      " |          >>> for idx, m in enumerate(net.named_modules()):\n",
      " |                  print(idx, '->', m)\n",
      " |      \n",
      " |          0 -> ('', Sequential(\n",
      " |            (0): Linear(in_features=2, out_features=2, bias=True)\n",
      " |            (1): Linear(in_features=2, out_features=2, bias=True)\n",
      " |          ))\n",
      " |          1 -> ('0', Linear(in_features=2, out_features=2, bias=True))\n",
      " |  \n",
      " |  named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, torch.nn.parameter.Parameter]]\n",
      " |      Returns an iterator over module parameters, yielding both the\n",
      " |      name of the parameter as well as the parameter itself.\n",
      " |      \n",
      " |      Args:\n",
      " |          prefix (str): prefix to prepend to all parameter names.\n",
      " |          recurse (bool): if True, then yields parameters of this module\n",
      " |              and all submodules. Otherwise, yields only parameters that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          (string, Parameter): Tuple containing the name and parameter\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for name, param in self.named_parameters():\n",
      " |          >>>    if name in ['bias']:\n",
      " |          >>>        print(param.size())\n",
      " |  \n",
      " |  parameters(self, recurse: bool = True) -> Iterator[torch.nn.parameter.Parameter]\n",
      " |      Returns an iterator over module parameters.\n",
      " |      \n",
      " |      This is typically passed to an optimizer.\n",
      " |      \n",
      " |      Args:\n",
      " |          recurse (bool): if True, then yields parameters of this module\n",
      " |              and all submodules. Otherwise, yields only parameters that\n",
      " |              are direct members of this module.\n",
      " |      \n",
      " |      Yields:\n",
      " |          Parameter: module parameter\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> for param in model.parameters():\n",
      " |          >>>     print(type(param), param.size())\n",
      " |          <class 'torch.Tensor'> (20L,)\n",
      " |          <class 'torch.Tensor'> (20L, 1L, 5L, 5L)\n",
      " |  \n",
      " |  register_backward_hook(self, hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a backward hook on the module.\n",
      " |      \n",
      " |      This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and\n",
      " |      the behavior of this function will change in future versions.\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_buffer(self, name: str, tensor: Union[torch.Tensor, NoneType], persistent: bool = True) -> None\n",
      " |      Adds a buffer to the module.\n",
      " |      \n",
      " |      This is typically used to register a buffer that should not to be\n",
      " |      considered a model parameter. For example, BatchNorm's ``running_mean``\n",
      " |      is not a parameter, but is part of the module's state. Buffers, by\n",
      " |      default, are persistent and will be saved alongside parameters. This\n",
      " |      behavior can be changed by setting :attr:`persistent` to ``False``. The\n",
      " |      only difference between a persistent buffer and a non-persistent buffer\n",
      " |      is that the latter will not be a part of this module's\n",
      " |      :attr:`state_dict`.\n",
      " |      \n",
      " |      Buffers can be accessed as attributes using given names.\n",
      " |      \n",
      " |      Args:\n",
      " |          name (string): name of the buffer. The buffer can be accessed\n",
      " |              from this module using the given name\n",
      " |          tensor (Tensor or None): buffer to be registered. If ``None``, then operations\n",
      " |              that run on buffers, such as :attr:`cuda`, are ignored. If ``None``,\n",
      " |              the buffer is **not** included in the module's :attr:`state_dict`.\n",
      " |          persistent (bool): whether the buffer is part of this module's\n",
      " |              :attr:`state_dict`.\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> self.register_buffer('running_mean', torch.zeros(num_features))\n",
      " |  \n",
      " |  register_forward_hook(self, hook: Callable[..., NoneType]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a forward hook on the module.\n",
      " |      \n",
      " |      The hook will be called every time after :func:`forward` has computed an output.\n",
      " |      It should have the following signature::\n",
      " |      \n",
      " |          hook(module, input, output) -> None or modified output\n",
      " |      \n",
      " |      The input contains only the positional arguments given to the module.\n",
      " |      Keyword arguments won't be passed to the hooks and only to the ``forward``.\n",
      " |      The hook can modify the output. It can modify the input inplace but\n",
      " |      it will not have effect on forward since this is called after\n",
      " |      :func:`forward` is called.\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_forward_pre_hook(self, hook: Callable[..., NoneType]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a forward pre-hook on the module.\n",
      " |      \n",
      " |      The hook will be called every time before :func:`forward` is invoked.\n",
      " |      It should have the following signature::\n",
      " |      \n",
      " |          hook(module, input) -> None or modified input\n",
      " |      \n",
      " |      The input contains only the positional arguments given to the module.\n",
      " |      Keyword arguments won't be passed to the hooks and only to the ``forward``.\n",
      " |      The hook can modify the input. User can either return a tuple or a\n",
      " |      single modified value in the hook. We will wrap the value into a tuple\n",
      " |      if a single value is returned(unless that value is already a tuple).\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_full_backward_hook(self, hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]) -> torch.utils.hooks.RemovableHandle\n",
      " |      Registers a backward hook on the module.\n",
      " |      \n",
      " |      The hook will be called every time the gradients with respect to module\n",
      " |      inputs are computed. The hook should have the following signature::\n",
      " |      \n",
      " |          hook(module, grad_input, grad_output) -> tuple(Tensor) or None\n",
      " |      \n",
      " |      The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients\n",
      " |      with respect to the inputs and outputs respectively. The hook should\n",
      " |      not modify its arguments, but it can optionally return a new gradient with\n",
      " |      respect to the input that will be used in place of :attr:`grad_input` in\n",
      " |      subsequent computations. :attr:`grad_input` will only correspond to the inputs given\n",
      " |      as positional arguments and all kwarg arguments are ignored. Entries\n",
      " |      in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor\n",
      " |      arguments.\n",
      " |      \n",
      " |      For technical reasons, when this hook is applied to a Module, its forward function will\n",
      " |      receive a view of each Tensor passed to the Module. Similarly the caller will receive a view\n",
      " |      of each Tensor returned by the Module's forward function.\n",
      " |      \n",
      " |      .. warning ::\n",
      " |          Modifying inputs or outputs inplace is not allowed when using backward hooks and\n",
      " |          will raise an error.\n",
      " |      \n",
      " |      Returns:\n",
      " |          :class:`torch.utils.hooks.RemovableHandle`:\n",
      " |              a handle that can be used to remove the added hook by calling\n",
      " |              ``handle.remove()``\n",
      " |  \n",
      " |  register_parameter(self, name: str, param: Union[torch.nn.parameter.Parameter, NoneType]) -> None\n",
      " |      Adds a parameter to the module.\n",
      " |      \n",
      " |      The parameter can be accessed as an attribute using given name.\n",
      " |      \n",
      " |      Args:\n",
      " |          name (string): name of the parameter. The parameter can be accessed\n",
      " |              from this module using the given name\n",
      " |          param (Parameter or None): parameter to be added to the module. If\n",
      " |              ``None``, then operations that run on parameters, such as :attr:`cuda`,\n",
      " |              are ignored. If ``None``, the parameter is **not** included in the\n",
      " |              module's :attr:`state_dict`.\n",
      " |  \n",
      " |  requires_grad_(self: ~T, requires_grad: bool = True) -> ~T\n",
      " |      Change if autograd should record operations on parameters in this\n",
      " |      module.\n",
      " |      \n",
      " |      This method sets the parameters' :attr:`requires_grad` attributes\n",
      " |      in-place.\n",
      " |      \n",
      " |      This method is helpful for freezing part of the module for finetuning\n",
      " |      or training parts of a model individually (e.g., GAN training).\n",
      " |      \n",
      " |      See :ref:`locally-disable-grad-doc` for a comparison between\n",
      " |      `.requires_grad_()` and several similar mechanisms that may be confused with it.\n",
      " |      \n",
      " |      Args:\n",
      " |          requires_grad (bool): whether autograd should record operations on\n",
      " |                                parameters in this module. Default: ``True``.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  set_extra_state(self, state: Any)\n",
      " |      This function is called from :func:`load_state_dict` to handle any extra state\n",
      " |      found within the `state_dict`. Implement this function and a corresponding\n",
      " |      :func:`get_extra_state` for your module if you need to store extra state within its\n",
      " |      `state_dict`.\n",
      " |      \n",
      " |      Args:\n",
      " |          state (dict): Extra state from the `state_dict`\n",
      " |  \n",
      " |  share_memory(self: ~T) -> ~T\n",
      " |      See :meth:`torch.Tensor.share_memory_`\n",
      " |  \n",
      " |  state_dict(self, destination=None, prefix='', keep_vars=False)\n",
      " |      Returns a dictionary containing a whole state of the module.\n",
      " |      \n",
      " |      Both parameters and persistent buffers (e.g. running averages) are\n",
      " |      included. Keys are corresponding parameter and buffer names.\n",
      " |      Parameters and buffers set to ``None`` are not included.\n",
      " |      \n",
      " |      Returns:\n",
      " |          dict:\n",
      " |              a dictionary containing a whole state of the module\n",
      " |      \n",
      " |      Example::\n",
      " |      \n",
      " |          >>> module.state_dict().keys()\n",
      " |          ['bias', 'weight']\n",
      " |  \n",
      " |  to(self, *args, **kwargs)\n",
      " |      Moves and/or casts the parameters and buffers.\n",
      " |      \n",
      " |      This can be called as\n",
      " |      \n",
      " |      .. function:: to(device=None, dtype=None, non_blocking=False)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      .. function:: to(dtype, non_blocking=False)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      .. function:: to(tensor, non_blocking=False)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      .. function:: to(memory_format=torch.channels_last)\n",
      " |         :noindex:\n",
      " |      \n",
      " |      Its signature is similar to :meth:`torch.Tensor.to`, but only accepts\n",
      " |      floating point or complex :attr:`dtype`\\ s. In addition, this method will\n",
      " |      only cast the floating point or complex parameters and buffers to :attr:`dtype`\n",
      " |      (if given). The integral parameters and buffers will be moved\n",
      " |      :attr:`device`, if that is given, but with dtypes unchanged. When\n",
      " |      :attr:`non_blocking` is set, it tries to convert/move asynchronously\n",
      " |      with respect to the host if possible, e.g., moving CPU Tensors with\n",
      " |      pinned memory to CUDA devices.\n",
      " |      \n",
      " |      See below for examples.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Args:\n",
      " |          device (:class:`torch.device`): the desired device of the parameters\n",
      " |              and buffers in this module\n",
      " |          dtype (:class:`torch.dtype`): the desired floating point or complex dtype of\n",
      " |              the parameters and buffers in this module\n",
      " |          tensor (torch.Tensor): Tensor whose dtype and device are the desired\n",
      " |              dtype and device for all parameters and buffers in this module\n",
      " |          memory_format (:class:`torch.memory_format`): the desired memory\n",
      " |              format for 4D parameters and buffers in this module (keyword\n",
      " |              only argument)\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |      \n",
      " |      Examples::\n",
      " |      \n",
      " |          >>> linear = nn.Linear(2, 2)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1913, -0.3420],\n",
      " |                  [-0.5113, -0.2325]])\n",
      " |          >>> linear.to(torch.double)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1913, -0.3420],\n",
      " |                  [-0.5113, -0.2325]], dtype=torch.float64)\n",
      " |          >>> gpu1 = torch.device(\"cuda:1\")\n",
      " |          >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1914, -0.3420],\n",
      " |                  [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')\n",
      " |          >>> cpu = torch.device(\"cpu\")\n",
      " |          >>> linear.to(cpu)\n",
      " |          Linear(in_features=2, out_features=2, bias=True)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.1914, -0.3420],\n",
      " |                  [-0.5112, -0.2324]], dtype=torch.float16)\n",
      " |      \n",
      " |          >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)\n",
      " |          >>> linear.weight\n",
      " |          Parameter containing:\n",
      " |          tensor([[ 0.3741+0.j,  0.2382+0.j],\n",
      " |                  [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)\n",
      " |          >>> linear(torch.ones(3, 2, dtype=torch.cdouble))\n",
      " |          tensor([[0.6122+0.j, 0.1150+0.j],\n",
      " |                  [0.6122+0.j, 0.1150+0.j],\n",
      " |                  [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)\n",
      " |  \n",
      " |  to_empty(self: ~T, *, device: Union[str, torch.device]) -> ~T\n",
      " |      Moves the parameters and buffers to the specified device without copying storage.\n",
      " |      \n",
      " |      Args:\n",
      " |          device (:class:`torch.device`): The desired device of the parameters\n",
      " |              and buffers in this module.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  train(self: ~T, mode: bool = True) -> ~T\n",
      " |      Sets the module in training mode.\n",
      " |      \n",
      " |      This has any effect only on certain modules. See documentations of\n",
      " |      particular modules for details of their behaviors in training/evaluation\n",
      " |      mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n",
      " |      etc.\n",
      " |      \n",
      " |      Args:\n",
      " |          mode (bool): whether to set training mode (``True``) or evaluation\n",
      " |                       mode (``False``). Default: ``True``.\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  type(self: ~T, dst_type: Union[torch.dtype, str]) -> ~T\n",
      " |      Casts all parameters and buffers to :attr:`dst_type`.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Args:\n",
      " |          dst_type (type or string): the desired type\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  xpu(self: ~T, device: Union[int, torch.device, NoneType] = None) -> ~T\n",
      " |      Moves all model parameters and buffers to the XPU.\n",
      " |      \n",
      " |      This also makes associated parameters and buffers different objects. So\n",
      " |      it should be called before constructing optimizer if the module will\n",
      " |      live on XPU while being optimized.\n",
      " |      \n",
      " |      .. note::\n",
      " |          This method modifies the module in-place.\n",
      " |      \n",
      " |      Arguments:\n",
      " |          device (int, optional): if specified, all parameters will be\n",
      " |              copied to that device\n",
      " |      \n",
      " |      Returns:\n",
      " |          Module: self\n",
      " |  \n",
      " |  zero_grad(self, set_to_none: bool = False) -> None\n",
      " |      Sets gradients of all model parameters to zero. See similar function\n",
      " |      under :class:`torch.optim.Optimizer` for more context.\n",
      " |      \n",
      " |      Args:\n",
      " |          set_to_none (bool): instead of setting to zero, set the grads to None.\n",
      " |              See :meth:`torch.optim.Optimizer.zero_grad` for details.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors inherited from torch.nn.modules.module.Module:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes inherited from torch.nn.modules.module.Module:\n",
      " |  \n",
      " |  T_destination = ~T_destination\n",
      " |  \n",
      " |  dump_patches = False\n",
      "\n"
     ]
    }
   ],
   "source": [
    "help(nn.MaxPool2d)"
   ]
  },
  {
   "attachments": {
    "%E5%9B%BE%E7%89%87.png": {
     "image/png": "iVBORw0KGgoAAAANSUhEUgAABKAAAAAwCAYAAAAmcALsAAAgAElEQVR4nO19v2vjyvf290+Zf2JKk+JjUqybiLeJSCHYwpDCEAjmQtDCRQQuJkUwC0YsBLOwmIULDgsOBFQsqFmUYnGKi1MsCiyoCKhYULGg5nmLGdn6rZF/xZtM8cC9G1kazZxz5pxH55z5vzAMISEhISEhISEhISEhISEhISEhsSn833MPQEJCQkJCQkJCQkJCQkJCQkLiZUMSUBISEhISEhISEhISEhISEhISG4UkoCQkJCQkJCQkJCQkJHYMsysFhBAOiv735x+ThISExCrYfQLqhwVzYML6sQNjke8ksRH4cD6bMD878J99LJuHZ49gDkyYgxEc/5XP0bPbghc6rxJ/wHrv0lheC1xYAxPmrbsDY1kf3FsT5sCC+1xj8B2MBiZGd/6zz8Uuwv9vguHAlH7vKvg5RpsQEKLDEvabXhJe6H7x7D7gLuCZ1/aFr4F/N6oZb20HpQSU/98E/VMFDUJAaBPa2RC2V/MhdybUozZG/6X/FsB5r0I9HWedhp8T6EcqjFsf4V0fhBD077Y7MdMrFXSvByfYwP2f6Z0ktrzOwvAwPiYgx2N4OzAnG53vD+xLXuNAhXo0xPS1z1HaFjxZ0Pco2v9uK0Dc0Xn97cK67ELdpyCEovlWx/BbjeDuyYJxpKK7tXn8U7BL673hsdwPodIGet+CHZj3XYGDPiEgl84OjGV9cC4JCOnDea615+RA+9p79rkoxTPohH+rgxICuq9CPTJg1Y0hImx9b9wxfO+DEgLSzomZXgV2ae9aI57dB1wO642dnnltX3hM7l23QUgb45/PP5Y4Cgmo4K4PhRAop0NMbBu2NUbvLQWhHUzqvES0MWeUyUGfktxJCb4aIKQF834DgvF9CPVIxbAihXX6sQ312MR0VeXKe94LF/aNwWNBpXG7jJPnwfpbhfq3lTBwa1vnlbCjG6ugroiD6Tw9txHUXqsdnaNVkeN8GEcq9JttBTK7OK8eJicUhKroDsawrDH6xw0QoqD/XSxwcj9rrFzhjVmD5Hxp2IYe5dvV5xlLCvdDtI/aMAVlZmMQ3bdW2t9E7/d6CKitrv0fREBtVye4jmuj1UmTre+NuwX33zYIIaDvp88+ls1D+oC7LufrjZ0kAbVJ/GEElIuRRkBOJ8l0uF8WdELQ+lDHALoYHRGQMyt5r3sTLUJBKYF+m/yyPX1PQUiPORPrFoxtC1re8164sG8MKzl5u7x57ejY1i2nwuuXNx87Oke7Nse1sYPzem+iRQh0KxYkBTZ6lIBciATObM9pnXSgRR8ynvuddmZt109Aid3vFel0GqJ2b90kRu79XgkBtatr/OrwMuWtCt6DW/GRLUTwOIP3W/SePqwz1gPK+Lo8ebj+cW0Kr2i/eHYfcBcgCahN4o8joKzcenYuJP/YtR7iXFIQ2k98iXY/qSBv+uifE5BERoSLcTtGWMUEw/9hYcTryIc3s9xaUe9uPK81Nz9bcKOaR16jb56xL+PaWXnPgPyeAj5cK+pfY2JkzeAXGeqy563wTiPLFauR/e3BuR7ysQ4xvstzjILEvYfXTmrjidXl+i6sz3xer8ZwojRqz8H4Kme+w5DX1bK608Q72MmxFNWnLtaA9634p4MWIWid9LLXx8eXel92nx46bwjImw56Mdku6h0R/Iy9V/x9E+/GaoYr1zCxFnlrKG5848/KzHeEp1lsLkawfuSXLmXWPvqSUVNX2Dv6mCV0I/mO7m16/fL7ZRSvVWyOEvM5KigLjutqkfynZdCCm/rd5L/8uauUD9Hr0htfppdIrGdLQgcnmOWsfVw+RpYL/4ndr7i2vUD2ymTWszHMXT8+1i+zmD0vW4eYfeHjND878H8H8H0fwe+ccZ5Mqh2UexMt0oJ5zz6k0H+KMu5E7Hn5NdW2K/Wey9jRPBtXInPr1KOEvUk9s8yu7oJOZ/s6iOmSqC1Yy74VH1fZdYI2vfp+MUJAwJ7Ukbn6drTK/xC/LkNALbn2ERbP43IW82Vy372IgCrxS4L/JmyMD+k5Z7ZwGPeTStdh8W7BA++1VNTjq6DXSaEvUIaKPZ/Jgg6NEBBNZ9eV9h6rsMfpvZG/Sy7SvWRE7Ova4GFy0oD63ikke4K7PpRa1STTRcXI4y6Na/1Y236xUdv1UnzAALMvJsxPKX3xHYwGQ9gpOZjdmjA/2vDCbOwU3w/L4r0wDJP+5dUYjucW+KHlNsazhzAHFmaJ+7P5Gt8niVo2vglmebZtXTxDHF5q3X+KEcdC9y6T9cEI1kNSZuMEVK21yYtXBeP63HlIvU+9JuRLZUCxGnBCVIzmCsAMCb102N9ozHHw2TPUT25CMNonHdA9Bd1zA92DBgghUN5PE8TV5LQBQhrQznowL3RoewSEdpjB5unoKv9tVQ+a7Bc1F+MTynth9dA709CkBPSwD+dX3qSXPG/Jd+qdaWgQgsbppJwMeLSgHxCQPQXdcxMG7+OlvIunsy7urZwaMM87aFICcqDDmm9w3OC/UaHuseuMU5VdRzsYXfeh0CbUUx36Me8VdtBfpGTy91QOFNB9Fd0zHe3oPS8Xm2ARO7tYgymGRyrUw2ZuL4Hgrg+VEtD9DoyBCaPDroueMb1SoR5F425CjZUl5H05db902TwfdGEMDHT2KQhRoMedJ9E1fJygu0dA9jToF+Z8DelJvI5fhIAKMP2ggfJ+OL0LHdo+BaEahjFDu5gLDfpFD7rWACEU2oe4XAVwLpXY2neh7BGQvS4mS+hK+MtB/zCpGw1CQN+amHLdmF6l1y+/1KR4rfgcHXXQOWBzYHAdzJQFx8bTOY/kgZY6XkwGW1AOKBoHbehni/5Dneuk0ywkHzXlaO58ZAIZHjAed9ChkQ7m6FoYwr3u8PnVoJ8z+WgcqmiVftnJkb1KmZ3CfJOTIRtlLkUZrZXrENkXBQpfb+WvAoKpxt7jXFKQI1by4d90C5q2itjz6muqbdca7GjGxnFZoir6d/lOzep6xHo0UkLR7BgwY/IbPbPMru6CTme/aorpkqgtWHXfWkB0fyuz6aL3E7cndWWunh0V8T/Er8vs40uu/WJv5Hb7vAtlj0I9VMq/IOcQUFV+Sfg0QZcQtAZJm8b85UXmZvU68Hc7UKAQ5vcpg4KMo8y8VPgCRbolsOd7twbUIz7HewrUIxXqVZH9FrDH6TnmLQIS4DIf/1Ax34c17jeJvF8E34V1paN9oEA5UqEcaDCuZ5VZROFv9j5xPzcuF0oUl1TdJ8Ijb0D+vx5G1wa0Q+af0f0OzDq9Edc5rkcLhqaguU/ROB7O1z0MQ/h2H+0jndvwANOBCkoa0K3qsa5jv9is7XpZPiB776SPxNrgEGif4+/D/D7KsxnTNpfdR0XnREk8nxCKzpeYX/DLQf+AzG2OcaqgQRto0JQfKmBjwrs+aDrL/ccIKkl/eOQZhEUx1rp4hrguxdad3auB7pcyAl7s3nm2oRP3D95mfSPhtRGJVwXjehHbW01ABT5834f3YKH/loIcFBAuAsZzHphwksn4GnBliwnQ95RA8ZelJ2O4868hAZyLFggxYHPlC+weKGklmyv+smHQhcLkKnsB0soVkWjD+NeqhyG0jJLmC3ZeCV7VO7E+Jm2MMoJNM2WLC/iYnJLMOgXfemgRip7N5sf9pCWCCbYu3DDMlZQb/NQYwh8j9lUr9TWGPSO2zvw9Wxd2LFCNHJ7FGosFcWHBV0ZeLnqcbMzI3i9+z3ySJ/vllL1bUpF8PubY/YTWMID9DwV500uuxVcDlNCYTAgQULxkNe3EJ/orRPJ+kpqLfzugpLVY67wSJ99iv40bbSFdCWCf06yR5EaxFZ/HdZTgEQX9uMPFbcucsI7NuR3bUNnGrBSWYzEZJGh/Ts3vCQUhXUyeaspHTTmqdD7SGxzXtXk6Pg9m6FmcFPLnwZQ4ASUms+4nNTkv83+LHBmRdVisac8qk4koONFhPRVdw8FL9eY2mc9L9yaVvSJgz0WuqUVALWtH+Tsl7Sifk4OyHlcr6BEfW7J348LW1i+L2L5OF5IQFbokagtW27dykHedqE0XvZ+oPVlS5kTnTsz/EL9OmICqem++NybIvd/R+OsQUCJ+SQD7nKT61KX+TWgdFu82KsjWKNQJUV8ggRp7vmAJnpB/XalHkW+pLT52R3YsLo88AKbptiApBA8jdPcIGqdjuEGIMJhieNwAIR1MRBqp55A9S5FP8/khLGD8FGVl8PYmtDePGYSwjnEFU/QPNJj3wVznF4HsYr9j9/NhnVE2fuFSzBX2iw3brhfnA3LCpvctrt+ErVfiw+QY7ZgPmE9AsfdNzPsxmX8QDMMQ00Er6wtx25HwQ0VsTGDDSPkpzA8lycSW0EGvzHaslWfgH2jfWUkC7JyW6qowh5EAJ9aOhokssNlHLWEHxdZGMF4VjOtFbG8lARUNnBAC8j/BrwYZ8AXh/TsYuxotKjOikfFgjfYWCx697EI54uOKKTEnypIbZo4RW5KA8r50shtkGCLwffhlhFwJAVX+TpxtzjgBvESxqJFz4SY9w+hYhfZxiiidN8/BYBkDUbZaUXDBDWJ6DJxYnD+7aK6jL388m2FVRz53DTLPFiOgpu8LjAQfczozr0ouA9+H76eChMx7VAdxURlruseFb/XY1+2nSEZjzlda/6L14qepJJzOvLGK6Io3QaeAhGUbTUyX10FAZU6ASTm4XAaz42Eb0CKoLV+3OVJZPaLyUVeOKp2PdO+j1HWRfcqsfeUaZudaSGb55rIgdrgzHMmY0DoUrWkcEfnEnN1yuYnsV3wd+QadaoIrYs+FrqlDQC1pR1nwkaPX33r5a74OPcotg8zLGF0DAbUhnS4kIap0SdAWbIOAErbpgvcTnYNlZU5s7kT9D9HrahBQFe/N+o8aGbtd2UMjLwNKwC+Jgo65084Dqkimxdah4N1EdELUF4ijzp4vSEAJ+dcVehQFSvGPdNMPrdx9OBNnpBEFxHHCIjqFLpV1UooY2eMtST4x+abzADLTrmSZ3i4rjsu/6XK95B+8iYJhdNI5t9cJYpWTQoW2OoPl94vN2q6X6APynpnz7HIHfUrR/zSEGpsPNq8LncknoLIlosnreClpZt9KrbewjeFk2ZxMZvdpfxrBiJMm9yZaZb7SWnkG/o4JAqrot1V/r/KvPExOsgRU2n8TWxtB318wrhexvcIZUL43w+RCBU1/jRICFxLOtDmXNNGUfPqhxSeYBwvxPh8FL5trLHjtYu+sPU/JbabT+pYkoNLpgKObKVyRTLAaTcgT78QXPeqXE4ee+Qpdfe9SgYojka1WEThlnAr275WCmgpW1+HIs7pdA90oFZunBNYjoMqUvbg3WaVceg7GVz3ox8k0cXECSizIK2vE6lzEHYJkWcPw2sbsKUenRWSp7JpvvSQjvpEm5ClZ5ONhfavi4D0FChzhQucjERCIyscKclTkfKTHnbqucO2XIKDEZDZ1UAUfz/xrnNA6VMs1ywIV3XO4M5wqDWQfPFIOqYg9F7hmLQRUhR1l99Kgp/uc8B4/9da2zlh4L4zz7nw/VfY2QEBtSKcLSYgKXRKzBdshoMRtutj96tmT+jInNHei/oewn1KDgCp972JZXoaACkMBvyTK1ojK8O76oBlirWodajT6zsyLoC9Qeo8Y0nu+6NhE7HGFPHQyJ+xGpc+sj1Bi/s7SGfJxRKRKNnM2/LVEA/DfLkacKCr+WFAGTjanxxsRPWVE2qbG9cuHH4SLLN7YnhuVbyWzNjyMj2ll3JW8frn9YqO260X6gDwGjzJh7vqglJWQm28WOuBckETcXkhAle2HhTqcmq8aNiZBjP0co03aGP9kXEJkV91/2+V75Zp5hkTp5dUY9kMJ8RTWu3cayRL9ESb3bubDobCvEobVvr9QXC9me+v1gIqCDpFGsCkwIelg4jGhTgjgXR+U6LB8xhwmWHJRwYhSuw67MK8t2LYN256gd7QmAioM5w3R5v07CIX6d0Vz5hUJqKhfTgZFx16vSkAJOWSrElDJhsKrOvKsFKOB9vkIE9tma/+xu2YCSsxAZjKgeMmk+peJscXH9qUHddsEVOZvrLGredbmQSXJ1PGvTEBVbqxFWD1YZX2rclDQg6I4wGD3Z0cfi8rHCnK0AwSUmMxGc8ZS073rdvJrh9A6CH7dKc2QiiFyhqN+I6m+IOleK0L2vOKa7RFQDSh583ikYvh9A3oUZZ7ttWF8nvD91Mbwr+cjoOrq9NoJqIQt2AECqurkt5UJqPoyJzR3ov6HsJ+ymwSUmF/CP8jy4Mi5pIlsTbF1WIWACiHkC1Teo+a856HKHhfKA49LMu1B+HryPkJZ5PVjWzxnXadG+d/6UKiG9luaKoERRDSeo2QW75zoKSyV3PC4wugDUZKom35ogZDUaX2BDaMWUbYqAbUh2/UifcCQx+Ds3aeD1lymph9anHTKxu3PRkDlzhcjN/2b7tx++rc6t6uMjCouYyt+3tI8Q8ibzw8W/ZHIXjvRrzeDGvfO4GkG67OBbtQDj6owYn3GRH0VId9fKK4Xs735BFTgYWrbmOZ0bXcuSeZEOyHwOtP+pxHU9NdozjD3/k3WmNYRDGb0dFh5G9C6CKgEAnhWDwohUK5m9QRb5J3SzdhFwdNF06mECZTdm6cZs7FtioDiKYr89ys58tG7fEytwVIleFE97Sgn4E2OWUwu82qB896jKojj46oIxJkO5G3yPJAvMWL+/Yh9QXwX64sgoitc3vKOBmYpwbF13QYBVTKeMhQ6HzwVmI1ZVD5WkKMlnY9k/6UyPaiaa1GZDWP9lWaYnKYcYaF1qJL7AN53G/Z3T8jBZnOQ8+VzYMJ4m1/CmnhWpT3PXrMNAqpYr6uwvB7l9mTJvJfIGj6fTq+dgErYgu0QUKvY9FUIqGVlTmjuRP0PYT9lXQRUXk+mivcqmmthvyRcnNr5ndneeNmJ2DqsSkAlkesLhOnxCu75dcaWQI49zpXnnL5Pc5Ttw9VzVJoxIQhG8kTlbcUNwEvvwfs/RcR39G4sS4v3df0dFJdMbmhci55Mcb+DZ0HkNbWu6LmVe+8l9ouN2q4X6QOG8xjc+OrAfJPuiafDuh+jndKxpQioVBZx4XrXsjFRGx8H1lns3r4FnbRg3jvoxfofl+n82niGNJ6mGEV9TAtOtVv63pm19GD9oyBeFivmqwj6/kJxvZjtLSCgmJBkvhivkAEVDa6x18gZFE+Xe9PKKpCgYOQTYzMM15IBFdVZpsc9w/D/VWyuyxJQ0VxrOQv45JUYcie/b0KU6v1hikWj8vQGG8C5jNdtroeASvcXSNdUsy856frcqBFehSPP/y3RyT8MEVj6EgRU1Fsi29g2uOsnmriLrWHyi/kcD8OaGVDRfbNOFvsCxXWGNyrPNKjncxQ5tvl9LgJY75bRFd6rLONceMkG6UXrl4sVglUu5zSnN0rgFRMZ0SkR6YCbyUMs3VdQPpaWoyWdj2jt0xt2Vg+q5lpUZsOFHdE0aOkNXmgdlthcq+Sw6Gswnx82RhF7LmbzxWzXanaU9YnJrm0YBvC8ugSf2FiYPqSb7XIbsW0CakmdXo2AqrYFK+1beci7TtCmC99PcA6WlTmxuRP1P0SvWxcBFY0zu8/OPqr1CChhvyREZLtamoZW6tli67A8ASXsC4TZ8Qrt+UJjE/Sv8/ps5fR9ioP5R/nklFd0oEWUSZv7/j6cz+Pi7LC4zH/L661Ul+xZNIROyECUFcVJMvezlhOvbXJcsbVN7Ad5/+ZjclpBAOTIxLL7xWZt10v0AdkaWWcE9EBBK2EPeObTcTujn0sRUHOfMa3rnLhMfQgVszGcvDlQoCT2Y/ZO6nE79U45WCfPwD+O5K9D8R4izGHEwcnRDKH3MIQSex+xtRH0/QXjehHbW1CCF2D6njFo+vUUHu8BZb3XMj2gvBsd6rEpcDLewpDmHaU9b3aezvIQFAz/VgclFNp7CzPPh3s/Yaf2pTcSPkntj/y9CoQy7xQ8dn8bru/D9104n/R85zBnURLPEy3f+s6Pcjwdwnlkvbhc20R7j0AtPIKZMcSUNNC9cthYHx2M3inJo0rvTfZ16d2I39uFc9VFI7GZr4eAIrSD4Z3LTlO8H0M/IMlGjtHpDSf8Pb0ZrEGbHTMa38j4V8XWuQXX93hzyinMAwJyoGN058L3ZrA/6+w44sQcc0b2jQHr0YfH+xxkv+in7uf7cO+G6O7lH0VZvob89A+qoW/N4Pkupjd9aJSNrQ4BxU4dIaCHPUwePPi+h5nF7rU4JjT9PB/ewwS9w9TplfO1H2Pq+Yt7EQIlrpuCupLWvcJ+ccIEVN5aicsik38K9WLCxuN7mF4bUCnN9nRI25+DaH59uDabX5og3AXlY1k5Wtb5CD1MTuJrv5CPegSUqMxyG8XLAPKyi6rXoToDavqxi+5Vsa2bj4M7ncVOLv+6wx0aEXsuZPOFbNeKdnS+tip6NzO+H08x/lsFpcmTCNemR6n9wXuw2R6SE1zk2dVd0OnVCCgBW7DSvpWD3OsEbbrw/erak3oyJzx3Qv6H+HXrIqCifXZht104c3+iTgmeqF/CMD+5KaMLIuuwQgaUqC+QgvCeX+MUvEpbmyH5JqxR+Ns+rKjEcY4pvCC2nnvduQ/qP9owjxsgh2ZBM/Ho1LbU6aQ/bZjHTWgfppWZPPkkT4Q6ZE/U/yn1YT5qiH7hIPzloHcQ8+23Mi4+T+9oIshkZacEhHTnHy/c6w4aqaQFZifKMpJX2S82bLtenA+40EFCSCZTjTX9zsbtyxFQi7h2btOj+DRl/8RtTDiPVdJZi4wEzL5TpV0seCcxnmGx7uN7Jkfeg4W+lvcxJc8GVnAYebaKaujbkX1zMDprJU6NFiUHhXx/0bhewPaW9IDyMb3q8FpsBrrfgfkt6ezNrorSX4sUvKARHS/Ry5BTws3BAkyvIuePgNAmOlc2xu8ICOklvtiypovlDkVeCZ57Y0DbW8wHoSqM61mFsc55Xo2GZ/5/YxiHNPbMJjpXVRtggNl1cqwNzcicYBg8pO69p6XeZ00ZUB+Tz6GHBqzUWNxbA2pM1hrHQ0w+pjepANOByje42Pw9Won7NzQDk1vO2saO5wy+m4tnJOrFUxvhrxnbrKJ5IQ1o52PMBPojZWuGo6N7Fzo0tMeseeSFUzHPKTw5MDvNxbhyZcGHM4jpAaFodkw4qU3X/z5EZz+29qSB9iDdB0BMV8IwhP+NEaNltkKcgMpbq3qy6NnJ8ZA9DcZNcbZAtG7mv8nf5fbCEJGPZeVoaeejQNYGdTOgRGWWI8pOKWruXroOVXIf9fZInfCRY+/Ykb1lDm0U5MWOghew50LXVNquVQmoEOFvD3ZCr/Nt+jr1KPleDWjnE1hXajYwy7Gru6DTq5bgidiClfatHDnOv07Mpovdr4Y9WULm6sxdtf8hft3aCKgwzPUnzIslmpAL+iVhGC6OrM7bGyvXYbUSPDFfIAuhPb/G2CptbUEpSD5ia+Wn92GKZmeIaUEpzGLOu1D2WQ8T5UBB+2wI60fJx+Y5phge6RW22cXkTMPwvuJevgWdEjQy5ZAurHMNjT0F6mGnvLfMJsYV4dcM43MNzb0m1EMFSmeI6Q8H5qmCBm1CPVByfSN2Elb5fr3SfrFh2/WyfMDk/TIJFTyzKv37ZQmoMMzzLSyML7JrK2Zj+BzRHP6AfySqjDnWzTP4UwzjsRohaBxX7dmiHEYKv11MzrWErNNDA+OHoOQ9CtZGxPevEddX2V6hJuSlR7LuGqLjB0XeSeQUuzXNxyrPY4JRcYTjKs/9taH1TR85XDlvbO2KMm2qxiv0roLykbj2t8C1Quu3pjmuOs6z9lyW36uO7K7VVtRZqxXHk8k+rCNLVfKxTjmqK2uCJceV9ynC0wTdxKlHW5CLNUNkbGuzXSuDP6fOXrKSHtWU8Y2OZTuyVN8WrLZviV+3pIytvPeIy9xSdnTledkM4mOv7AEleJ9C3JtokbJsxnrrsBRW8THXuC4b0+/f27LREuXg1TAipfcr7xcbtl0v3QfcsByIzsnO+Y81/Z1aNmdpmV/j/lC219aO64vnoeYpeBISNbCzhk9CIolVAozdQADnaggn8VXXh3VOk6fTrRnuJ20tzVolJHYFf74tkHO3PFxMBmO48aDo9wzDon6cawHP4IwdZS8h8WLBTy2rfajEhvHn267n8QElXhnWGNdLAkpic5AElMQfgj/f+XBZuSRtonNuwhz0oGsNENJA90vNkzQr4cP5bKJ3prFeLF+qSyolJP4U/Pm2QM7d0vjloH9IQfY06BcmzIHBytOomu07sjJcWAMTRqfJ+pp8362AXEJiI/jtY3ZX1bpk+/jjbddWfUCJVwtJQEn8EfhhwRyYsAT6g0lIPCf8uxHMwSj19ejPg3c3xvC8C/WoDf1iBPvnJoIaD9bfKtSjLvo3u+dISkisgpdiC+TcLQsfrjVC76wN9agL42qCaWW/rWUwxfBIhXqsY5jb10RCQmJbeBm2a1s+oMSrxRrjeklASUhISEhISEhISEhISEhISEhsFJKAkpCQkJCQkJCQkJCQkJCQkJDYKCQBJSEhISEhISEhISEhISEhISGxUUgCSkJCQkJCQkJCQkJCQkJCQkJio5AElISEhISEhISEhISEhISEhITERiEJKAkJCQkJCQkJCQkJCQkJCQmJjUISUBISErlwbw10PjgIdmAsm0JwZ6JzbsHdgbFI7BB+TTE81TH5uQNj2Rg8TM66GN7LY5olJCQkJCQkJCS2A0lASUhIZOBed0AP+nB+Pf9YNosAzqUCejKWJJQEwy8H/QOKzrX7/GPZNB7H6FAF/TtJQklISEhISEhISGweO0lA+f9NMByYMAcmrB/PPx6JHPywXuD6+HA+mzA/O/DXct3uoJZO/RhBIwrM++cf93YwhXlAoHyY7sBYojV4ifrlwhqYMG+3TNXa7QgAABPgSURBVOz89mB/ZrJfrbMB7HMKemb9Mbq9KvxbHZTqsJ6efywSEhISEhISEhIvGztHQPm3OighoPsq1CMDlvf8Y3pVeLKg71G0/60IEu/6IISgf7cDY14bPIyPCcjxGF6t63xY7xqg7d3MoqmnU+zd6Ln9okvv0gi+GqBEw2hXCJ8/Sb9EbUbooE8IyKWzxfExcpGQBpQjFepVOckY3PXR2iU52ApcjDQC+s/r0nkJCQkJCQkJCYntY8cIKB7Ya6OdDORfBZ4sGEcq9Btv8W/fh1CPVAy/x677kwJkYaxAQJ2rUM8mFb97xncS1CkWgLdeUfZThCnMNzsUhD+3fuXpfBHybEYunoGAuuuDEgrjq0iJGdeV08mryX6K4N90QV4d8SYhISEhISEhIbFt7BgB9RxfyKvhPbiVQWnwOIP3+/nHuhHkBcPPHSBvBMsSULuMOjoVwP6HgtA+ps8+7u1j+qEFQnuwg+cfy7Pr10aev3377l23QUgbY5Fm4j9G0AhB50sVkfYC4VvQCYH2+RX0vZKQkJCQkJCQkHg2bJGACuDdjed9aIbXToKw8e9GMAc6NEJANJ3169h2r5BceJicNKC+Lz4NLLjrQ6Gd3ToxybMxHJiwHpL/7t6aMK+nyXd5cjAamJj8FyD02X+P7vz5f5tnGggh0M5MmAN+YlgsQPV/WOy6gYnhzUwoeyD+G/NqDKeq1PKHBXMwguOHCTka2fnBotD9f3twroexa9x8YqnyunRPqNj/x387GMHOG4fvwpr3qLHg+vz3lfK/bp1y0KcE5JWV30UIvhoghAqTLvG5Z+sW/3us31F8fa8mmPnZe8XldWS58L8JEkDxew+GGN8Jkieeg/FVTD9+8gyhMp2f96UKMLsZLnpUxW1GqVwXEVA+XGtU/x2eZrF3H8H6EX8+m//eSQuEtND5x5zbj8L1vG6DEPWVZgG5GB39KaS6hISEhISEhITEn4otEVAuJqcN1ofj1IB53kGTEpADHdYju8a7NaAeKWgQArKnQC3t1+HDvtCg7DfQ0AxYjz5m1wa0AwXqfgPNzhDTkkCjNn67GJ9QKJdZEioin8aPYvcKfozRPWiiuddE52oK37NhdhQohwoaexqMm3WRboxMaCUaK/MgI5Xl4d/qiyyBn2O0CUH72kPosdIa9aABQggaByrUoyHLjuEEVPukA7qnoHtuoMuvU95PSwiMAM57FZRQNDsGzIGBzj4FIRUnMfHnKQcK6L6K7pmOdvS8xLoI3v+Xg37UG+bUgHGqoEEbaNBUECZ0XTojiv//UQedA4rmWx3GmcZkPk1UPo7RoQSENqGdGdDfNkH3VKhvqjJF1q1TIcIfI6iEFPby2Z7sphC4GJ8qaEa6/eTBHnSgHKhQ9hrQzidw15F9+MhkX/1U9R6LudfOejAvdGh7bG0XdoCTLccddGhMdggBOehjGtM/97rDe3Rp0M91aPsUjb1GJQEV3PWhUgK634ExMGF0mqAZfcj/nUIIGgddGINIbxvofnGFdF45UPjfFJh3KZtRKtcN9v5xuf7loH9IQWgTnfPoHWgp4Z98dw36RQ+61gAhFNqHyPZMMTxSoe5TEELRPKzqfxbAPicgpAenQk4820T3UGX6dKhCPR1iuoXTIv3/xjCO+TOPVKhvjbmurwPOBQEhxm5kAEpISEhISEhISLxIbIWAcj9pWQLA54F94ourWImGf6uDnk7g88AnCjz8MET4e4r+m3jPmwDOQIWyR0BW6W2TQ0LVJZ9YQ1w2BueSgBBGGEwiwuBLF0S4X0kVeEAVb4zNCYZ0lodzEVuHvGCypASPnoxjwX8A56JVHsTwMpckycGa4JZ+fefPa13YsQyrAM6lklxXwftPBy0Q0sYovnZR0Fz7ugICiijof/MTv0sSHD4mpwSE6pjETqDyvzGCoEwP1q1T8TnWb/2cv9eU3ccxuoeMECErnig2/aCgNZjOx0eIAj0iu7wJumRNTdN5GVLVXAV2D5S00PsWJzRtGJSAzn/L5z1lH4JvPbQIWczTExt/vlyXEVALmY739mJyUVZyxnpdkXdWbL58WOc0SUyX6vwomcWVsRlcrt/0YMeui4ivxfzyks/UdYyQKzmFMZrrk9S7/9sBJa2EToiX4HGdfWOWlp96XzqgpD1fU++mi0bi3V2M/1IZGUx0WGv6EBJ870OJyVzw3YRK11vOOP3QEi9XlJCQkJCQkJCQkFgCWyCgpujT/Oa+rPFpvORBJFhmwY3xNUD4rccC0kQwFQX/cbKJkzFvjESgUxsxEsqrTT6FCO9NtI5GcEMX4zYBISqG8RK5KMBe0xHgrKRoEQR5122Q4xFG5/FAmQWkc1KkJgHV+5Z8ZmXA9zuA7/sIUhkrjNToF2cfFPWkiQL4KNNL6P7TglKzNJEkel3B/2dOxUvJtzdBJzfjpkoP1q1T8bUrID2WkV1OeK7WV2YK8w17H/dfNj714yw7V2sJ9qOspYoypMBnMlYqE/xeF6l5T+mX96WTX/Yl0IMp8H346cybyt9xmU7YzJx3qqHzGZshKteFfYcc9Eoy0dic5TXM5uRaTF/FCSiRtfcwOSEgpIsJz6TybnSoxwasxP15pukaD9OYvqcgpIU+bwoffDfRPupieL+OjxXxuXppff0kJCQkJCQkJCR2CZsnoPIIjQg8I2SRcSEWLAc8UGJfbNMZGzzASjjSPDBZx+lGv12M2oRlxdTtFRLwgDHKtEh9bWeB1Rr7cPDnsGwLFiC3rz1Wchc9+3GMdpysq0lApYMVsYCP93w577JSkqMoQ20JAioK9hIkUcX9C2UyRSKIXlf5/xFS8l34ThV6sAGdWqxdQQC6hOyy0s5VT9QL4PsBwtCHdZaTxcjJjvVkbggSUGE47wvWO2vPZaxJcwio9Lyn1q6QeBVsAs56Rxno8jFEpXNlv3O/sKydxkEXxtUY9kOaTCt4ftGY0vIoKtf8OtZnKo4eOiUlqGVktXORlM31ElA8Y4sQXoLYhn5lZw+f4DrSGpSUu9aE+1mbP5fuq+he5vcSWwWSgJKQkJCQkJCQkNg0npeAyvytzilJUSZGKriYl5nFMiJ4UL6OE378b30oVEP7LcuEWorQ4plbNPGeUQ+SdN+mVcCIEHrp8EwhnjXgW9B5IO/f6iA0FsxtnIBiWWRkrw3j8wS2bcO2bQz/WpaA4mTPyYQHjgL3f8kE1Ao6JRSA1pBdRgasmHUYm5MeIUlZDaMsv+rSKdFnCBFQvC8YPezCvLa4jE3QO9ouAcVK1Rpon48w4XJuf+wKkQjBTwfjwaKPGtlrJ7NptkhAsT5TOSjoV1ZKQKX+tl4CKkT45MDkvbbmhFA6Y/VbD4S0YNh5paxL4tcM43ON9dCKcLAOmV9AElASEhISEhISEhKbxuYJKP41OLec4nsfNOHw1iCgijIxuBMdz3ZimRgE6tsuuv/00D2gUAflTW7zwMinqOyuuDF5FfIzt/gJZPMMjwDTqy66xxroIW8CHHqYnKnQPjgInizolIC+Lyer3E8qyNEIzq0OchSVhLBsktaHKZyLVCnXhgkothap8q1whRK8KOONy4zQ/QMbRq5Mpogj0euWJaA4WZrtuVShBxvSqYjMyZRY1ZZdti7mGwLyPw2dv3T0/tZA97rLnxR5b6KVLvELQziXLCMlyjYJ7ofonrahURVDPhbvRof61oQT+LDOKAjtFwTunOSq6CfF5kCHlSh/KyjBqyCg3E9qfs+0KgIqkoFEOaLA7/LwNMXohILQGGm/CgHF5Trbyy41J3xN6/a8Y/Of12eOl8jF9E6cgOKZlJmy2Qg+XGuM8fdI7gP49yPWCy5lt9j4mtA6XegXBjTaQPeL4Ml+OfDuxhhb7qL34KOF3kHq48ujBeO0A+1NA7rFxsjK9HRMfrI+aoR05qWDeWAlrq/1FEAJCQkJCQkJCYltYAs9oHhD2szX2gDOZSt1Ilv9YDnZY4U35o01iQ3DKEhVoPMj6FlQkg4gy5EknxbPq09C8UAnNcbgqwFKCFoX7F7BXR/a3zbcr8ai30miZ4rLAt6qflH3JlpEgXKQJCz8Wx3kqI32m1QAuGECiv09HQgFsN6JEVC6FeS834IQEbs/l8lMj5b0UeSi1y1JQHGSJtNA+1dVM+zN6FREXuVmVtWQ3bgc0bcmPyGMjUO5mmXuaV+ZMD+XZxMyoibVXJ43oyZvenB+hQgDB33NgP3IiEOmJ7x0j68hC7IL+kVFY64gdZ1LkkNizTBcIgOKyW/24IHIvhUSSfw+nRSxEVh6+e/4GmeeZ+lJvV2FgCqSa07ozucksNGj+Q3kA88rtql8zjLE7c9spmudJuSsv1O+DfItnWU9JdadZ+EmSru57lMNJs8ocy4JyMEQs5z7BvdjmIMhJg8FJNx/Q9a4PWXTkqWGHsbHGkYPM4yOCFrnrKH99ENrnoHImt+Xl8Mykr6cpJKQkJCQkJCQkJBYBVs5BS+8N6EQAuXdCM6jD9934Vx10SAUnet4Fod4sBxlYpCDPgs+Qx/TgQZC1eTJYxmygDd0fdOHI3jcdD75tLh/LRIqytyKv/vjBPobgsbp4kQ5zx5ifO/BOiOgUXnZtx5I7HQo77qdE9CnwTNR0sEHb96dCcbzCChO8rQ/TuH5PvwgXL4ELyUL3oON0TuFl5VUE1CEdjC8c+H7Prz7MfQDkjzWXvD+7FSpmEw+OovrYrIidt2yBFR0qhaF9t7CzPPhPVjov+V9Zsr0YAM6FclmIQEjKLthGBEosRPJeK+mNLkVZScWn77HdJv1f4qdfPbbxeSsBbLXXeilZ2N4PYV3q4PQDs+2YllNSlQa+HOMdgEZEJEzxeNYjDm+Zu79ZLFmdQmo0MPkhIJQDX1rBs/3MLP60DJ97HL0+oCAHOgY3bnwvRnszzonK8R+N773mB49WOhrKUKzhs7n2QxRuY6uUy8mmHk+fN/D9NqASim6N2XyEJ8zH97DBL1DGtsPBO1RDGxPySdgGAGVPHXSuzXQSp/Wx0m21nw/yGZlZW1zSQnpf0MopJnQaf+/IdpxAu73DJMrGx7/2ND/HiAixxblgQ76tKxRPx9nehw/J9CP2jDv1tfsXEJCQkJCQkJC4vViOwRUGCJ4GMM4XDRwJXsajOtZirQRDZajTAwN+nkbjT0F6n4T2tkQdjp44IHv4qs4c/qjr8TVY59ieLQ4br5oPJMzbV7yUzoPPLOBnhgwDhtoHipo7nfQv5llx+Nb0GOBhvtJTfTUcS6TAVERnEuaE+DwLJp0YJTbX8jD5JT3iomCuRWakLu3BjtCnBAQ0oB2PoF1pWYyaxKIMqA+JuWIHhqwHpe7f/Y6C+OL7JxUX7c8AcVKLduL3i60ic6VWZEBtQmd4nqlFffBqSO7jORdyCojmnJOLvtpwXjbRIPSYuIrypyhHRh/q2jsq1D2m+jkNmJmZNU88P4xghrvx3PXLySLmezmna6WRt6a2Ri/IyCkx0lOUQIqRPhriuFxYyHT+x0Mb4ZoV5XSPVqJ9W9oBia3Q6jpTLGMXZlimOpj1Dg24TzFrxPX+XybkTdHEwyPs3Pi2SbaeyQpxzdV/fp8OIPY/QlFs5N+h3oEVGD3QAktmHMf06sulP0mb/auQD01YT2m7O/3Pmhc3gpP+uNz9LELZZ+ClmQeuTcGtP0mFP5c5djA6HuWnJt+aC1IxCCeBcjXqLS/lZN/sibPwFpH/0QJCQkJCQkJCQmJrRFQc/zy+alWK9yDBzyLnkbFyGRi3JtokRYM28P0eszLg7aHqGeNSN8TNvYoeOJZIPNyjylMbV0NnsWQe+z70gjYse/pE6SKkAp+A79KjkTvv+7rlsRvfv8wRD3SKFyPTnGw07a6mDxl/yYuuyWljD9tDK1sMOv+2y5uvn/HMpOqejOFYTgPvCMyhBFfi/eZDrSC5tBF5Zaia7YGLLmOS+slH79fkgm6ss7XmKNqnc5D9TsIg5cErnJ6HSsVXRBe/k2Xk5oe7I9WgWw56BWVhQqD6dw884rvc1F2ln/TLSeReFljz5aZThISEhISEhISEpvD9gmoNWCeiSFaqhfLxJj//8ME3TWfIlSNWA8dgS/yyeCZ95jhBFTw1YD6frq+4HfXsUxz5T8Awd0Qw29JQsS3DNDnCgZ5aWa2/KmG7KazPub/P8N0oOSUuPmYnBZlnsT6PxX2pso+Oxr/7KO60KFfNozDWLlm5r1z+gpJvCpMB60VTlRM9huL///s3oRS1K/v3kSr6vS9SrDM3mbUjN/SQUiTE1AuRu1O6QEA00Fr7afqSUhISEhISEhISKTxRxJQUSZGdbDIshqUWLldcNeHShVox93ksePbQJS5JRrgBC7GfzXROOxCP23DGPTR2W+ifaajezbaevbWs+KFElDuF9a3qdkxYA5M9M7YUeuN04l4Js66x/RJy+kFU0N2v5toUA2j+UmELianDbQONbTPs1kgwbceWoWZR7ycsaKB8gIB3H+7aO6p6J510T430e800TzWoZ/qGBXo/HTQqpf9JPEywRvbF/efKsMU5h6F9nnRl8/90kXjjQrtOFsqzOBifJxtQr8MfLsPba+B9pmO9mkf5rmG5mEX+mkX/a8l2U+cfF3HGCQkJCQkJCQkJCTK8GcRUD9G0GJ9SwipWTLzjLD/SY2bEPTs5x/XH4MfFsyBCeslHhHuORhfGegeqWif9TCyS04A2wrYCYsaPzVxo7IbOOjvFzT4t3uZ55J/7PW/771ZcsiAxGtD8K2HFjVgb4Hg9/5t1zxFdd1gDd2fdwwSEhISEhISEhKvBf8f5ZErlB6Qj50AAAAASUVORK5CYII="
    }
   },
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "输出大小为 $ (h - p_{h}+ 2 \\cdot p_{h} + s_{h})/s_{h} \\times (w - p_{w}+ 2 \\cdot p_{w}+ s_{w})/s_{w} \\times c$,计算成本也是如此"
   ]
  },
  {
   "attachments": {
    "%E5%9B%BE%E7%89%87.png": {
     "image/png": "iVBORw0KGgoAAAANSUhEUgAABLEAAAAbCAYAAACDdx3PAAAU40lEQVR4nO2dv2vjytfG3z9l/okpTZqwRdzEXUQKwy0MKQyBYL4QtHAxgcWkCGbBiIVgLiwmsOCwoIWAigtuFqVYnOLiLRYFFlQEVARULKh53mJGsn5rpCQbJ3uKTxFHtmbOnBnNPHPm6P+CIABBEARBEARBEARBEARBbDL/99wFIAiCIAiCIAiCIAiCIIgqSMQiCIIgCIIgCIIgCIIgNh4SsQiCIAiCIAiCIAiCIIiNh0QsgiAIgiAIgiAIgiAIYuMhEYsgCIIgCIIgCIIgCILYeDZXxPphwZgYsH78rnt6sC8MGBc2vOeuO0H8AThXBoyJBWcDyvJ0OLAmBowrZwPKQuTxZ/jhE/Ooz2v1PuP9Z2I6MbL39lYwz8XnL6HvkQ8SBEEQBEGoUy1i+UsYBxq0/SmWdX782oC238Psv/T/fNjvNWhH8+yE7acJfV/D8MpDcD0GYwzj699lDBfzAwZ2MIe7AQ1DvAQ8WG9b4L0cXyYqsc8YGBvDDj+7mULjLYy++s9etsfDxpgxsDN7A8pC5PFn+OET86jP63SfyR9nvSsdnDHwNxq0/SEsV/7vzoLOGRjfhravYXjlPr99Ksj4oALetxn0gw5ajIFtdTA4M7G6f/66EARBEARBPDUVIpaP5fsOGKs/wQp+ztFjDL1P6V1QG2POwFgP85/J7/j/DsFYG8ZNQCLWn4prYfhCFh5B4ME60aAdm+QzDcgTD3r7PRjfXpN4QCLWprMJfrg8b7BRtEk8tYiVGWfl87o7y2wguJc9MNbF7LdFcdfBhfW3Bu1vK/HMqCti+d/G6DCG1sEQsy8LmOcDaJyBH9KziCAIgiCI10+5iHVjoMM66Ow2ELECB7N9BnZsJY/n3RhoMw7OGfQrL/Gd5XsOxkbiPiRi/ZmE4uflSxCxiIfQJPrg5UEi1qazCX64CWV4EE8qYtW7ZrNtmT/PqFdmD9YxA9ufYhX73Lnois3B2+euI0EQBEEQxNNSImItYewydN4vsWg4KbTPOBgfJ3aXnY8a2M4Y4xMGdrKAH/3PwbwXE71ik2Lvh4WZzHsx/bKKRDH/P1PkwvieurdnYzYxMF2UCCG/XNiXU5Ez43wO23XyRaxfHlbWTFw3MTCznNKcWbXKdLeCdSHzdkxmsH4kRb2iPCPe9QzGZAbbq2gDz1n//vkctpst5/wmGW3gLoRNzO++vL+4j3s9j3KPzIrsWnK/XLtPpphfh78l86C866PNGNqHo9I6Rja48+BE7RP7vcR9cmwbJP2qkX0yedRifxfZwrUxD3O1XFhw4vVTbO/HqPsaWeYrJ9V+Jla5tvcTvjC9tOH+anZdZuGWrn/s76IxINE20f2kLWL+W9ZPyvwg0aYpuy7y/DtuwwsLjqcoYiV+OzXO3C8xnxiYXafbUZbt0zI2jqb6amq8ivuOfWEk7RMve6JvFttrZjnw7sTYlsmHVObrD/VD1XFZ4brn9EPRHgb0LgNjXeiZHE6q/S20+QLTnOePc2XAuEz6SSDbzfwvNsYpP5N8rL5M13bKFbEcLM4NGB9MrPySMlf2meQ4K2ymo8sYWFdf572Sz9iELVN5psr6RmHdVPxZwUdE3qsR+jsMbKePUaxPx31QZQ7hex48LxUpeD0GYxzjb+XjHUEQBEEQxEunUMRyPnbBdsew75vvbHpXOhjTYmH9YheSn9nifzz2m54FnTFoH+UEXk6Ke4d98K0OBidDDHZbYEwIa34QILgzMWAM7cky577yWGJe2e5tjHcZGGuhczTE8KiDFm+hxVMi1r2N8R4H49voHo8wOu6ixRj4XwaWRbknFMvkX49F+P+bLvTTEfRuC4xxdD/EFhoFu9viuET2OGac9e/3MZwM0X/DwbiG8XU48ZWi3a6xFhllLhGeEhI7ux3wNxoGxzp6YRuc2YkFUfX9AgS3FvRdmb/jxBB2ZwydtxbcYInpvgZtbzs/z0mQZ4M2tL0WWrsDDE8G0N5wMMbR/zjHeJdje28A/biHzhYDYx2Mo+NBIi8bZxzb/SGMsLysU88+mV11+feOBm0r9C0N25yB8T5ml2N0+Da0I32dy2R3jKVfr70fVvc0YZk76PAWOgc69FiZk7v6DsyjVtRvjJO+uG5Xh9Xgusy4kq6/yhgQtueZOPYc2qOzxaHtdSr6iYofSPvs99Hf5dj+S8fwuBvZx4z/9u0cfZmLp3s8hP7XNvhWS7RzmYh1a2KwxcC2utBPjfU4cxjmAJJlODKTi245ZnYvnIzd4+NV68iMFvKh73R2hb9s7w5g/kz3XwPDvuiH6X7uXPZl/+xCP9HRfcPR2tPQTvmt83kg7t3VMTrV0d1iYFsDmIVRIjX8UHVcVrzuOf3QvRpC29dkP22hs69BO19m2rO8v8URx/XbH+LPHxkVzUdYxMQk8Uxal6vOM6mzG9azA+M6b+xyYL3t5Iwhad9X6TPJcVbYbJ0LSgttJo+iJ2wZHdGs7huFdVPxZwUfEUdGQ59O5utK+GDDec3qHw2M6bCqNrcIgiAIgiBeOPki1o8ZuqwdJbZtHJ5/K46GRccG5aJr+K8vj43FJmTfxuDxv+WkkB/O4UQ7zz7s0zYYG8rJuI/FCQPbiQkNuZ8lWU7aYKyH2W2yrP2EiOVjccKzk3B5XTu1uAtK75/67H6BIY8vUgXOpz44a68X0E1FLH+BEWdony5ii14H80OeFGVk+3Q/Omvbch3WXbINkr8TLtRibaV0Pw/mEYuE0bCs/tcR2oxjtJB1VjxOKGzA0LuIRy04mHWZ+Dyei+3exmgnFuX3Y4ZuJl+b/G5cxKyyT5GIlfYteb/0UQ9R91j/qCViNax7hoIyh/0hJpw4H7spgSdA4ElBOGY31etUxYPyMSCQR5RZcrH9S/pfWT9R8oPQPh2Mv3oJ+/Tionvo3zsjLGKLSP9a5K4pFrF8LN5xsJ1Rsl/8OwRnPLKF92UAxgYw79bXpEWI8DhRvB3F/XnkY6HvdN5ZsYiedZ0T49HHbtJ+cnHNj+NimhcJN1G7hXa9SPrheDcuADf1Q9VxWX38fnY/zCtDjX6U8acTBhZPgv5jBo0xEaUTG1vs09jv1Hwm8cNZMkIuYbPwGZEqewbVPpN3BK/ecUKVvlFYNxV/VvURpeOE9ec1oc06H5aZ/xEEQRAEQbw2ckQsEYHCY0f9mueYWMLYYWCnYqIpEreHEzqxOxwuAp1PveRkT04KR1+Tv5le0PuLUVL88hcYJhaX2TKNefooY4DM5NI10U9EOawRItgwsasdp6pM7uc+8hPPSnuFZWsoYonFbc7vfx2lIuPkQon3Mb8ykhP6kvtHO8Vywqx0v0JxaoXZgYbuP3LyXUvEytpA+Gq6bWQOkbBtf/nwPA9+1fG2KvsUiViZBaZccKV9Toq6UV1riVgN656hOBec8PNwZ1/0G/4u3W9CgSX0K9Xr1MWDqjFA5NLL9sdKsVfJD6R9Mm+gTC2i5XiRHXeqjxPmHg1K9wPZ5wZfQv+Ti9pI3BFjR9bu8pi29L3IJrc5ZUhHl6baQ4xbWrafp65bfmhnon6CIGeMb+KHquNyjfH72f0wrww1+lHGn/4dJiJy3Mse2MEMsxMRBR3+vrHT/JmUtsXaZqoCVp0+81ARS61vFNVNyZ8VfUQ1J1adeU0kYL216C25BEEQBEH8EWRELOdTD4wPsSg7bqGMXGjtizcI2Wc8ERGy/NCWkzm50I6/WUdVwAmjgMLQ++sxeMkEv1gkSU0uyxLVfh2VhvVXlanMnvZpbLe1oYglfj/MCRJD5ptK5y0JI3gyURKFNpDHU+TkX+l+qol/H0XEytrWPksvHGQ+qZOBOI4SHUNJf7fEPnVFrMyCS3z+uCKWSt3L6rBGLIarRMggGXGpel1eeQvEg3J7FJdfRTyo9gPFNi30b8WcWK6N+fkI+oEoQ3isdm1HOUaGPigXtZGoJe0ucskl+6Eeiywrs4nI5TPEQNpBk8ehwjoVjluJuofHAkXOn0RZjrsl7aHoh6rjco3xexP8MFOGGv0oQzziWZard+mKzYbw2XKbjIR+6DMpcZyOMWyrvMhAuc88UMRS7Bv55VH0Z+VntWJid9V5jbwuHUFHEARBEATxmkmKWD9N9DlD/2IFz/MirHcMjI1g5e3WVyCidPowXbEbmpiUX4/BmQ7LE7vOiV3GGgKOfcajSbZ9xnNfu72u4yOIWAqCTFmZShcM8f89SMQKc4JkmSYSv/qwz9rIHEMrrae0lRQdle63USKWPOKz1cPwwsRiscBiscD0f3nfLbHPKxexEomCy9ol/j/V6/LK+9vFAxU/eHoRKzzWpP3PwNwSZVh8HkFL2VFEZ4gIm3S0TWhbkUsuh7+tUhFL5LpqoXcygyntsPhn0FzEkjl/shTluVP0Q9Vxucb4/fx+WFPEqhwj17knRQSfjLDyLOhSuErnpHzoMyn8nLEwB1heVFd5OxT3mccRsar6RqmIVeXPjy1iBYrzmm9j8LzIOIIgCIIgiFdMQsQK86WUUvdV8TIfx/jjDFp6YiujCUaf5uil8nXUEnBuDLRZG8Y3IZTlHSFJ3zMblp+aXMr8JmI3O1WGz/3qCI+SMi0/FB1HdGEeVgtpVYui4t/P4n8TC+j+Yc4xo8JFhjySKX1B6X7SnpWT7d8gYoUvHJh+r/5uqX1euYiV8PP0ixfiyIXU+LrGdXnlbSQeFOeKUTt2W+UHim0qx7nseFElYsnjWm+t5FGnvH4goy70K1fk0YofTy2ze5VNwu/+s0pen7K/87EgcXTiOhkxtl+ykfAQP1Qdl2uM38/th7llqNGP8nA+amD7M9hXeqwtRNu0PyxhnyaP1z30mRRFYn1yEL7ZOJ3/MINyn3mgiKXYN/LrpujPTyBiKc1r7lZYLBZY3ZWUjSAIgiAI4pVR+HbCOM2PEwYIBY/WVitnIign1Tvt7OKoloAjJnntbhftyh1gmUw2s6spj8hFk0uZR6PoCFlZtFdVmW4MtDP5lYJo4RpNVgsm+WIxWbwoEhEbPGdx4MN1Y5/5S5Gc9tCEm5fcWbaBbqV+Ry4Qw/Kr3c/Oz/ESHpv48Jg5scqFHPHdPkw3WVbrbfpIR4V9HlvEUmzvJxGx9qdY5fWTnXRi/vQiXUaqRTljVK97LPEgzBGU7ffibV3F/UTND1TbVI4X6bxnUjQvFrHE7/D3qYTM36eZSCxhMw520Eefx16GEASIjr3mjUt3bjSG5fqO7HP9z8l7+ZaetL8ct9L+mb5OJNHOG4c9uIWLbVU/VB2X1cfv5/bD/L6r3o9yuTHQZh10dpPijXelg+330NtJjTOqz6QKESv6XCbQz/SHBKp95qE5sdT6RlHdlPz5KUSsWvMagiAIgiCIP4fGIpb7RYd2YJTvtAYBoh1qln7tt/ydMPornTi5ZhSSWOiXLdbXiOgahs7bGexbD96tjdnbTub73pUOzji67y2sXA+eu4J5qoGrJK0tLZMH65iD8S7G1gqu58H9bmK0x1O71zJKY3cE87sLz3NgXwyhcVaxKHJhHnIwrmH0Rfy+5y4x/1sD5+Ebznws33eS0UU3BjrxY3PhERHex/Taged5cG/m0OUu+9Kvcz8RAcFZC4NzG44XszvvwwzrInfN2ycWHM8tPL76ICFH1jNsf/f7Yt3+sTdEVdrnsUUsxfZ+EhGLMXTemcLPPQeL911wxpPCRspunufAPh+gxTj6l07t6x5LPAjFRrarY3btSLvp4g1nZf1EyQ/U21T493q8cL9bGP/FKyJY02OBg+WXMbpctElGzJUCcvwoWHpcax1Npd09OAsDvS0GTb4xr3ATIG4/d4VFZL+4/cN+HpbVxcpalzW6LmyPrUE0bni3CxgHLbA9IzZuNPND1XFZ9bpn98MgjITqYXrjwrvzhKij2t9ykWNJOnejfEFANqJO8ZmkKmLF7F9WVrU+8/C3E6r0jcK6qfiz8nxFRnbtDGHdenDvyt8AXTmv+WlheDSEdZvzP4IgCIIgiFdKYxFrda6Y9yIIJ3I8//iDjD7JCFx1j9KFr8GuiOAJca5CcYCBsRa6Jxbmp9nJovdVTHTD45T8TR/G14KEujl1Ky6TB3vSQys6qsmx3TdgpyIV/Jtp8v57Q1ifRpWLouCXi0Xi9xla3SFMOdkN32jUTRyxCPM/SeEmjMT6Z47hHk+WIT1prrhf+PuryyG6W+XXLCcaeGYBreYHqkJOtv1NWOdaVHcl+zy6iKXW3k8iYvUMzBPt10LvfJmJovC/J32BbXUxvFw1uu7RxIMgQHBrJe7X6g5hnFYf46ryg3pt6mN5HrMh30b/3MT0oEzEChDcLzE9aCXGmOliDp2x6M2ua2R0xiS7IRAEAbz/Unbn2+ifL8sjsQrsZ17JaLB4Lri8sk70bDt5KyFiJ8a3KZbpo4gN/VB1XFa5bhP8MPhpYhCWM+Zrqv0tj3hOpfXnMsIrdzxQeCbVELHEeCnmCekju/X6zMNFLJW+UZpHrcqfa/iI/81Yjzmy/IXR7hXzmjAKWrcU5yQEQRAEQRCvACUR60VwY6DN1lE/avjwPA/+r+prfc+D51VHX9UvkyiDV5G/ym+QVD/x+02+m5qYq9lA7X6V9blvYO+GtlFp/99N8/auS2qB+EvRJqrt81vaMd9uam8nfAI/CG1Y93uPaat7r1EZlP0uXtayxf8vtfGtqR+qjsuNxu9N8sPf2o9U2+wRadpnmtCwb8TL+WDbqNa30byGIAiCIAjidfNKRCwfixMOdmSm8p9QmR6E6hsFiRdMyVvhXgwOzMkcTnzx/2uFaVEeHKIhPuzzKez0MbQTXp2jqRLyQ4JI8grmEARBEARBEE/ACxexHFgTA8P+tshz8u337bS/rDI1hESsP4BXIB7c2xjvcbCtLvRTA8ZkiP4bkaNNJXcdoYoD86gljmGdGDAmI+jdFhhrYfC5KkdTFeSHBCF4RXMIgiAIgiCIJ+CFi1hLTPc1aAc6pqp5qv7IMjXkhwVjYsCityK9YjzYFwaMC/uF7/Z7cKwZRsc9aPsDDM9NLOkIzpPgXs8xPRlA2+9BP51h8fMxFtnkhwQheEVzCIIgCIIgiCfghYtYBEEQBEEQBEEQBEEQxJ8AiVgEQRAEQRAEQRAEQRDExkMiFkEQBEEQBEEQBEEQBLHx/D+yASXrrR63+AAAAABJRU5ErkJggg=="
    }
   },
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "最大池化是选取窗口内最大值作为下一层输出，平均池化是选取窗口内均值做下一层输出，一个只需要一个数，一个需要所有数参与运算，一个突出最明显的特征，一个考虑了附近像元的影响"
   ]
  },
  {
   "attachments": {
    "%E5%9B%BE%E7%89%87.png": {
     "image/png": "iVBORw0KGgoAAAANSUhEUgAABK8AAAAbCAYAAAC6EKyUAAAXDklEQVR4nO2dPWsby9vGz0eZLzGlcGNSRI3F03hxsZBC4EJgMOKAUeAgDEG4MCIgloARgSAMAZmADIYt/rBNWBdGLg5KEdYQ2MKwhUFFYJvrKWZWml3ty6xebDnnLn4QK/sy97zcM3PtzD1/hWEIgiAIgiAIgiAIgiAIgthG/nrpBBAEQRAEQRAEQRAEQRBEFiReEQRBEARBEARBEARBEFsLiVcEQRAEQRAEQRAEQRDE1kLiFUEQBEEQBEEQBEEQBLG1kHhFEARBEARBEARBEARBbC0kXr0ygtsBrN4AbvDyaSGIreanDatnwf75UmkI4F5asC5dBC+dF8SrwruxYPVseFuQlq3mxds42UAQBEEQBPFcxMQr/7oF48BIoY9xiYf6N+2FZzRPB7B/BC9u8GvHv6qDsTqGv14+LUQJ7vsweAWd79PnuY9AeNsFYwzd2+i3APb7Cnh9+EyigI/hIQM7HMJ/6bxQ+e3BPm/CeMPBGMfuuxb63/V8c3A3RPfYwC5nYKyC2mELfcd/eZv+MNxzBsa6cLcgLYJnbjuPNlo7HPWvXv51C238FfIn2PCCjC8M8J0O3OmG3rGCvyQIgiAIYr3ExCv3nIHx3dXFq6s6GKui8cGC1bNg9TpovdsFZwyV4xF9TV4BEq/KMb4oX383wn0f9YM6rLvy4tVS9xHp4tWpAeNk9Exi0jaKVz5GRxyMG2j2hrDtIbqHFTBWQze3jk0x/mQKH37YxdB24NhDWH8b4IzD+Ohi+uK2/TlspXiVbDu+jfaBgfbNBsTLR/Hs1rXy7Ls+jAMD/Tvlutcm/PwJNrwYPux/DBj/2DF/Ov5ch3FoYbwR8WpZf0kQBEEQxCZQxKsA9gkDO7FX3uKSJbAE/w7Q4AzVM5rorDtviXS2bxJIPBsvPincQvHq3kKVMbRsZeI1ddDhDOzMzbxvetdFjTHUzhd99+SzCcZq6P+7Bfb9IbwKv/VriDpjqF8908q7tPb84m38P2jDi/EC/nRJf0kQBEEQxGZQxCs5MDhfvUPOE1jE/5kYJOM7PE5gX0YrtQawf+Yvy/adPqyejUnsdw92z8LwPv5FTMSJGmGifJnzb4fo98T7BrZXINiJ51o3HsLAm6fzYoRJWuyp2DVDuH7aMwN49kDa28fwNn0CME+nuMbTFK+CnzYGvaI05KXJwsCeIPhd1j6RV4PbAKHvYnghr7t04Bc9KyUfZjG+HmX8IDXeV8694j4LLZOBMROtqPzUd8fSZ8PLjSOm1IGM+9Q871+5cXuTcU2Uv2P3XU/idTHnvvQ6nFevsmIwKbatw9YkP+1ZucXSnLHdbPpLeWdO3S28LmXlVdx+5e/fPtyr/sz/OCnvVG0e2B6CRxeD3Fg12ZMtNR9idc930I/aT1oZfZso4pFmWct0CjunCIIA098p6TzKWpEmP2zwDpy01Q2PIzQZQ/XTWM/GZXyqTKfz2YL1ZTGGmIgRpfr4aez9i3VUty0s5kVqvmbYnexbhF+y4SXKbvRvvLyzxCs9v5704xn9Wyn/p+aXzKMPDVQZQ/Wokx2HUdZn+0dKeV2N40KozM/Rv1OEgfj34DaY/ds6McEYg3mixANT2niuH80gPz/L+Yfwd4BJVr6v04a8cpv1D1NMrvsasbQ024l2O80f1yzbn4v23UHjLQN720BH8ZFZ8eEK+wedPngpf0kQBEEQxKZQxCsXXcZQv/IwfZzAdRyMH5ZbFp27Oiiw0WIMxpf55GB624XBGfgbE62zDlpmBYxxmJ/G2Su0brvgrArrXvnt5wAGY+AfnNgEzz5RJ5AeRscVMFaBedJB58REpXA7o8gbdthAg1dQO26jfVxDhTGwvW5sufrclgbavTYab8SS8+6tkpdPLrr7HIzvonFqod3YTdl6M4V7XgNjDJW9JtqnTdR2OCo7vEC8msL9KLby7DbasKI0sFo8DQt4GB6JNEX5sssZ+H4X7lMZ+2Re7dVQ47swjltoHc7zKvtZUT7EV3dEW1BrezLexF4To1/F90Zx12o7MjbPgQHjYj659r41RbmbLXTOWjB3GNhOE6OHgjqwZ8DY2YV50o7ZNfzSAN+poX7SQnNfpIWrg9ukkCL/rh+J+5qnbTT3KmCMofZRqfcZ99X2DVTemGidtlCP7jsfYnDEUdmro3Uyj9HR+BZNArLEFGnbTLhe0daUtsoYQ22vBv7GQPNETXN8Jc+sXPaa8/rFamglxASt6xbEq6T98u+DBhp7IpZJW9Z7xhsYKW3Mu2oIO2Wem284KvsGqrkrJtLyO+5/rKju8QaGDyHCcAzrLQM7HsUnrtEKgJtA04fId7+tocbFdvDa3xll9CR8clJ8Wqgfp06GPw4wcRw4s5iGRTaW86kq414VjDUxelR/l3k2WzU8f3/tuA3rtCHKdK8Fe/Z+3baQUaap+Vrct6j+bKGdXs3r7qJ4penXn8aw3qX48Xf9WJ6W939qfo3RPzBgRG3/jQHjoA07VUhz0eXJuuVhcLAohgY3rXnfpq7qktsTDekzKnvKVnBdP7pUP6nvH9T2qJY9f2dh/BSuzYbCclN8rXhPDVamfyrRTt7WUOMixl0rinkXa886Pmn5/lxs/4/eK0JbRNtV04TeMv1D6bpT6C8JgiAIgtgUc/Fq6qDNGDjnYIzN4PsdOI/lHpq/tS0xOXhy0OYM/CgeCNb72gBn1WzBRaZXDejqfTFEurk6kHHRUbY2eJcmGKtjoAy6prdd1BifTwyz0pwYrE2/d1BlDO3/TWdp6nCG6pmjTDylKLRnybhLUzgfONjbDhzly6WYHNfmYpycrNYv1Um7fFaeePVzADORL2HoYWDmL7cXEwcDffUL+Y8+TMZgRmnQsk/mVcI+kccM1d54IU2xcv9ixuwTdYmh9sFWvgjr3RuGGSsYojxS8/bJRXePgWdum82w639tcPm7Ksx5X8UgfVaeGSIUPxrCm9k1hXtWBWPt+aQu4754GUzhnHL5uyoGeXLiEZVNSfFqWVuTZKRZiLPKfbJc4oJWIK9TylX3Ol3xitXQVQPwPoiJ80xglyuL+IkqKAUzcbmMeDV1OuCsGg/AH/lAmf/Cj8UFGvFbC3ag60PmtnXsvG1dkWjdgp3l5/0RGrmCThwdG7V9aop/MxhD8zpY+C3y38IPJESdQLTvhbJfVrxKyVedviXyZ+l+fV7mC35L06+Pe9VFUUHW55kfX8r/peSX1rbBKZxTBqYGepflxRiPtR33THl+2rNzttwV+tGUelScn5r+IfK/Kfne4AxV1U+tYoNOuc2eNchZwRj5lBLtJFGvI9vmIrveuGa1/jy9zWa1Fd3+oVzd0fCXBEEQBEFsjLl4Fbiwjg00Pjrwgimm0wCe3YHBy8cYKCNe+d8a6dsIo6/pmV/75aB4NtgWA5v6lwHa6qD43kKVGfL54pnxlVkhwtDDsJ73LpnmZIyDxABbCEAptnzvgEVpkCvPzMvkthQhskUD4vFHnjqAKox5lbrMvTiGiiiHhHgVhpgGAQIpVGjZlzn5CzA6VoWU+LNnJAb3M3sfstOVdW+W3eNP1dQtUEKEyRq0Ztj1KzmRUevdouiU/LvzvaB8M+5LCibiPmOhbMafqor9JcWrZW1NkhXTJbHdbPyRp29Nk9fF2obGddri1cIJamk+ajFvi2PVpOT3NBDtM+86OfmaCzRypUrkn7R8SJZtKtFEzIR1n7MqU5a79pZyHRs1fWpamgdmfGWaEPaidjtGl6f5+BDBdVMpxxXFq4V81etbMv13YmXdgt/S8utilRNP8b32BwPGqegrl/N/y4pXQvSeC68yDw4HGJyqaRX5N2u7JcWrQj+aRCs/9fxDJO4utsdopWDOx4gSNmiVW8azFlm1nUS2yXLVHNes1p/riVdl+wf9uqPpLwmCIAiC2Bh/FV0gvianTNxyKCNe5Ykq7llc7EgixBQ5aPs1RJ3VMfwltglGK3y8r/X5M+SAWMTosGK0clcm5U/mowG2sEXGV1KRsUG6t+FswCRiXqjIeA7nLnJj5WjFvJJxJ06bsxMjxfa5nADAiW0Pg+sxvMRgUsu+nMlffJIpy/CnjUGvjWZ0sqVctr8gXqXYW3Rvev2KtkGIuBkxO04WV26VrQPJ34vEq3QRalnxajHt4vf1ileFtibJFHlUUSYvGK8UAE5sBNrXpb03Q7wqyI9MH7WMeBWGCGX8nM5JfdY+d2MfCRICjczf2WqkFX3IrD1epqy8SKOseKVlY8k6tpDuaJWStDMS9vLulytmhEC0oniVvE+zb8n0Z3IlcTSxTq9zBX5dS0xa1v8tL15Fwoaov/Ij05Uv+u+ob34Yoq6uwiwpXun6wzhF/aRmHcnzA987yF2Bq22DZrnpBn9fuZ1EoqQcG2r5pFX7cx3xapX+Ib/uaPtLgiAIgiA2RqF4VbiqIoXcgaMcpEerLXLFq6ITl34NUZerrILrJpg5gBeGyqBYCFmzr7tywCZidKSQOIJ5ThnxSsZXSqF/F84GTCLmRQoXY6wmXsmvgzt1tC9HcBwHjuOg/3dBXobhLOBsO4ppwTiMf+aBULXsy5n8ibQ3MJKxUcSWggrqpwOMZDqdz00t8Urn3vQ6JPNWxs1YJCd2C4lXaxav1MC3eZMO9f90r0t77xaIV9E2n/0mrCtbts8ROgfx60S5CYHGv6rHVxKs6EOitI2OilZmJfJD93QtLRuXF6+ia5rXwawP6DjT4vtj/7cZ8aqob8n23+K9/GNWv6jh18uIV6X93wrilbyXn7ty9YuprEIWwk5w04pv99+4eKXTT65BvNLx32XEq6JyW4d4pdVOondxdLXHNav256uKV0X9Q17dKeMvCYIgCILYFDPxKvjhwLn1FrfNrVm8EkvS518ixbamtK0KcrCQu2VRrNwwvriwT5Tl4LNBsYuOOrFJCRavh95EK9uWxfzMjOkShphtiTwYLAyUigblqbGrwmWOXp/CtzuoMYbaxUTfvpzJn9gKKdMQlcXnSfy6rG2Dqr2a96bbLQP4p+TtOupA8ncSr7LTHG1dEe/JK5dlrkt773LiVTzelI5dWe+L2lAL9lP+ddEWl+b1BKPjxPYeLR9SJF5N4d85cO78nKDWiWdlrYKN4iVK4UXPxhXEq2gL8vEIk+tmXNjL8/F3XfBZea1ZvNLsWzL9t9x6Fv8QMvdbWn5dpiEex2kx75bzf6uIV7INHQzg3rSUd8tV0p/GcM8SdXzD4pVeP6lZR3Lao9hynBeHT9cGzXLTFa9WbicJ27R80qr9uY54tUr/kFd3yvhLgiAIgiA2xUy8EgOR5ClO8yC0+cvv42QOHKMgn+rg495CNS1Y+q9EkNkMxp+q4mS72NZGMYAxDuuoxsQWuR3HTBnYPPo5R2vrTbREoGKeMoCbwvfjQd15SnytqT8fGAmRbzG+lJhIZ5dHcnVT9H77fZ54JYXChQHfBP3/m9utZV+UV+/thH2JE8Fk3s1PwpN5YLeKxSvNe8Mw4ySiSzM1b8MwgJ8ZhPVPEK8yRFEpPmxavGrZiXqTiPMj6nxtIfD79LaLqiJC6163LvEq8lHJep9W35LtKvl895yB8W5CBJqgf5BMhxRoTBOmapO2DyneNliG4KaV0faj8pinUc/GVcSruX80zcXTZUfHTDlAImIK91yNGaTbForLVKDXt0Sx6ZKiSfKjTtJv6fl1mbaFNMi0Sd+7nP9bTbwSbaiG2l5cMAluWmAHddTfJoSPDYtXpfKzUOCU8c4Wgt2nlMcKNmiVm654pd1OohMX+5ik3R8J2prjmtX6c72YV8v3D3p1hyAIgiCIl2O+bTDa6vGuC+chQBD4mFyLgO3qgMS/bsE4tGKnjSWJBuidb3L5t2NjeB4dw5w8vTCAfSKCYHbtCfwggP9jhM4+B9vr5r4nDMPZBDi5KkAMYBSxRDK9E6feVY77cB9EYGHPsVDfYTA+ZR2PrDvR8jE64mDcQOda2BL4Ywz/McD5XBj0vzXEEdJnI0x8kdfjqzYMzudBmqdjcerPXguDWw9B4MG9bKHGWP7A6t4Sq6XeD+A+BPB/OBi8F6ei5a28iian5kcHXhCI931pJYRFHftkXjGOxoUrnuWPMXxfA2M1dO+iCcoYlmqfP4Ezs69o26DevWEYrQKpo3/vI3iUQaSjvN1pon/rIQgCBA8OrMMK2L4VO1K+fB2I/75d4tVcaIjKxv9hC7ufQbxivDHLb/9+iNYeA9vrKvmdKNcggHfbR3MneRqp5nXrEq9m9T7yUT4mdhcmX6xvcRafP29nNiZ+AO9+hO47ecJrIh0ipgxLnJ6q60OKV16NPzfRvMg5Ej5GdCJeDa3LqF1P4Fy2RR+h+Fk9G1cTr2YiU+LEujQfGAQe3IsmKoyjcaUIJ1ptobhMy/Qt0WlrbK+D0Q9fXiPqEj8axUVO1V9r+nWRBqVe+BPYH01w1fcu5f9S7JYrZqqnNrzAXwy4HUN+vEieSipXGC6sbEyrB9EJvJ/Hou9RApSXFiC08lN/dV6yzgf+BKMzQ+S7GiNpFRt0yk1bvNJtJ9Fpgwy1D5Gv8eB8NMEZjwlOOuOa1fpzuarqbRv2QwD/URHLY2ObZfuHvLpT1l8SBEEQBLEJ4jGvHmy0TTl4ZwyMVWCejpRjhENMLmoZX//mzAboMzh295voXrnK8cgqAdxeHRX1+oYFV+soYnHCUhRDa4YcFKdNgoJ/h2jv83n6+C4aF+OVV16FYYjwtw8nZgtDxWxjlDhdx5eTmlkadky0rxOrzB7sWDorZhv2107hV0HvRkwo1TK0L+SKrYec+67bMNU0cQPtq0l8sFZoX7Tyqi9ELfVZN8X2jW76MNh860v2Kr7ie0UZjdCMbIpNvCbx9DGO3UYf48zjxf8M8SoMPdgxuyuoX4zQP3yGlVef4+2O77dhJ+vjU7JcKjBPh5gkJ8Y6161NvAoRPo3RP5z7Rv6mgX6v/MqrMJxifKG0H76LxoWD4XsGxjpxkSpazZAhpOT7kCLxSq7G2UuuqMgjgNuTHyHU9PfchO/UsXFF8SqcwvnAU4W9MAwx/ZHw8Tvmoi/Tags6ZTqnqG+J2qn1NV52lcM+xkrdTV0xqunXg+8WGm94vK7eJVY2l/Z/GXW5N39GkWDinvOUradyBU9GDLF4PfAxOo7aYH6Acp3VM8X5WW5rafA9Xqb8TQPW90S+r2pDUbmVEa+02kl04qKFYazfr6CeIuQUjWtW7c+nd9a8zPJiEi7VP+SlcRl/SRAEQRDEukkP2B4ddZ4qNG0ScXx1kBtTaY08pR3pvmZbClaOTYMAQZAfJyL1GGnN9y9ThjppyrYvMbCfFufxcvaVuTcnL34/c53bFjTKZW0kJgla9SvjKPulr1sXT0raS04UU9Odd83jCM3kSpUU9NrreilVhhtJgxA9olNltcori020hYy+JTkxLu/79Ot7mTJayf/p5PEaWaW/WCU/S6VvI336mstNqwwTAp6mv13WJ2nlSxm/8tz9A0EQBEEQG6X4tEGCKEVRwGPiP8cqIs9WMIV70YcbW5USwD7l8WDha8b7YmYHSf+v83MAU0PY2zYong7xulhv7DyCIAiCIIhVIPGKWDMkXhEJXr145YmtPnwXjVMLVq+DllkBYxU0v5U9ubSIAO6lhc6JKWLPfNMIhv0fIrgdwDprwdyJx4h6LZB4RbwuSLwiCIIgCGJ7IPGKWDMe7J4FKxnfivjv8tOG1bNg58TJew34t0P0T5swDuponQ3g/NrEVikf9j8GjIMmutfJGE2Ef9OGcWCgeT5ajIP2CghuB7B6g8QqPoLYVoSYbl0m49oRBEEQBEE8PyReEQRBEARBEARBEARBEFsLiVcEQRAEQRAEQRAEQRDE1kLiFUEQBEEQBEEQBEEQBLG1/D/0oOiYEun3sgAAAABJRU5ErkJggg=="
    }
   },
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "不需要，想想一下，给上一层乘上-1，然后再使用最大池化，则取出来的就是最小值的负数，因为最终的loss function是取平方的，因此正负不影响最终结果，所以这样的效果应该类似于最小池化"
   ]
  },
  {
   "attachments": {
    "%E5%9B%BE%E7%89%87.png": {
     "image/png": "iVBORw0KGgoAAAANSUhEUgAABLEAAAAbCAYAAACDdx3PAAAfBElEQVR4nO1dv2sbS9d+/5T5J6YUbswtosbqsrgQpBC4EBiCeMEocBGGIFwEETBLwIgLQRgun0xAAcMWAcFHkIsgFxe5CDIEtjBsYVARUPO8xcyudmZndmdXK1n2neKB4Gh3Zs7vOXvmzH+WyyUsLCwsLCwsLCwsLCwsLCwsLCx2Gf956glYWFhYWFhYWFhYWFhYWFhYWFhkwSaxLCwsLCwsLCwsLCwsLCwsLCx2HjaJZWFhYWFhYWFhYWFhYWFhYWGx87BJLAsLCwsLCwsLCwsLCwsLCwuLnYdNYllYWFhYWFhYWFhsBQGCh6eeg4VFWVhYebawsNg6nnESK8Dk0oV7OUHw5HPZYfz04J678H7uwFwsMhHcDOCeDzAJnn4uFv9iWLvx8hFMMDh3MbgJnn4uAubwzl241/MdmMsLh7EMvHyebM33Ps4wOKaoX85h49gsJOkzv3bhnnuY533XM/RpwT8j9M/d3Z/3o4cOraH7bRd8yUvTqZe2Hrv+ncHOxoDmSE1iBT89DM7aaBw66Fz7+Qf40Ydz6KD/YxOT9zE8IiBHQ/g7QMidxU0PhBD0bnZgLs8Q0wsHdK+LyWI74/lXDRDSwPDX06/d4l+M52I3Hjy09ygaf7/czfXG8GuIBiFoXIW+PYD3rgLaGObfIJaKCXqEgHyYPD2NXjoSMvDyeGLqw7fjewN4JxT0ONQxwzj2X2vnkvSZfCAgpIdJ3nc9F5/GEVy3QQkB/cOBc9iB5z/9nNKwuOmhRuoYqJJtwRSDkwZqewSEVFB728PobqF+1+8J3EMHjc+z5Bjfe3AOWxgmxvAxOnHgnHoIdnZvWNS/lr2eXfHzT7V+iwjG/t8cc6+H1ut9brvqaF9sNvmoSWLN4f3pgBKCykELnaKZutKchg/vTwfOn15MiK1gJ6BKGj4zx/10UMnYEtO/GnCOXEyfaRJreuHAOexj+uT0tXhWeC5248FD59BB+2t5TvhfA1US69SBczLank/1Gf/Ej2QlJ0yUY1ioZUCHZ5zEMvTh20hiLb51QGkbXnT0yjyJVdTOPe8YoGAS69nHwnzd9YGYaNhxWzb9VEvOeTFF74CA7DXQuRxh/LWP1msKQpsYKXWNr12RZJl8oCAqe7UYo0MIqp+mSpnZDRT1r+brMdN1xTyeSq6MCl12lZ8vACUnsfwvTVBC4fzXxdDzMPzQQIUQ1D5OsdjQGpRJrOmnGgipoeutubASk1hJIbaCbUTvZ+W4nxK7IU9lB9KFv1pa/Lth7cbLxwa+wpUzh5ITJruwzl3FvyCJZYrNJ7GmcA8IqufT2N82H3c87xigYBLr2cfCGn3bdVv2MEKLUHTHqyqr4LoNQhz072K/+zlAnRBtZeH8swNC2vCEo71TuK8IKKUgJ55Y3fGjB0oIut/VMvO8Yb6ewrr+VHJlpJMvjZ87hFL5zvSTvPNiCasFxu8pCOluzP8kk1gPI7RSjEsu6ATUn2B4wc55uxdDTH5pykqX4fn3LpqvCMirJrpRVVhMsH/7mFz12fvOBxirym6DObzL2JjCb2Jnbh/YGVHx/G2AuTfg7+9jeGPG8OCnx96VNabJ/H8HmEVzcDHw5qv58XOt7kkdhBDUT2I9A2I8iM+n/3WmLvGL8+bSwzzuRKKeAgvMvvazz+mnzXm5RNhnY3ATSOOO4f9WvTONDxk8jPNfelYvY/r+C4tfkgxLPIv31/BvhlFfg8E4XXaiQPpeXOvoH00lpIZfbHwX7ToBIXW0w34m/hj9cxfenfie+bUL90rKlnM6jv6J6WeqHpnpS1HaRDSKPSPK6ALTv124n5Plq2zMIaaP6vckZDNN1gV9Vcl1Uv8H3jySy4TOGNE0Dx1WuuVez6X3jzBT9HxJzPV7VnARe79OBuP25mqi1Om0NfjjPtzzPsb38WcWmF4xOzl5WCrO9K83L11fHNEOlLP2wnbwYRazZQN4PzW2Iet3ikosufdELl2Ny9mlh3nA36fso8Rp+L6JKiGoHndjdI9t4HLKrqg/aWOIWPwzYjp+J6+JyVc/vt4sump67xj3XJLjo7zxgCyjivVE+qILYhO8NE1ixW2/3jaW6j8F3z6AdyfyQ+fDV+9lPmquS2KVFRPduqgSRzpuZRjHptk5jY5oY4DlEsu7AVqHDbg/dLF3GTGxmSyk+7GcSaxtxcKmuqvc2yyENat9UBt1QkDqbfau6/9PsWU74os5z70TAvJ2tKLpIkAQBFIlBrMn9ONUTcebHiih6MUrdH4O4JAqeh86IKSDcayycv53I5b0MtEpH+O/1LEisxcjzBSVm5FdetDIvzSe6Bs0vZ3iz3BbOL+O/y57Pam6nqbbOXzk1vasCazWPzeIB4ru1xM6qarcNY29ZJoZxuHl7ZlL9P9pe7gHvj4uM4sgQPAorontaZsYbeg4dCKJxQZkBmJxP8V4PMbkrmDTL0USi52bDo8pdtA6qICQClpf1ArHyiMd7FMCQvfhRCWPXLAPm2geUOy/aaNzUue/E8tUFzc9OJSA/tFE57yD5h8UhDro3YQOhr/rVQ01Pk7tv7zU8nGC3msKQvfRPHXRae6zcrmPk5TyuAUmHx1QQrHf7MANxyS15JgG84/PoX7SRfekjgohoG9ctinnpaDOQQWEEFQOYiWlnAeN4yboXg2t05DmyRK/+ZcWKoSgUm+je9ZGfY+A7LUwuhf5WTuo8XFqcHWb3Kw5L5eINisHNdToPpy3bbSPaqgQAnLQw+RR/T41H/Q8FPkfPktQ+8Ce1cuYOmCK6MRlOORtO2YwmB45aB7X2Lng0zbqf1AQQtH8ojeq7LkqnNeV5HNXoo6k8cu/7sA5dFY9CA4dOBdTRnMall2vDN7gkIDQrhAYsC9oq6A+W4/M9KUobZbLOUZvmb2on3ThhmumTQzv4+9uYSTclMMDq6jMXXxPKJuVt6OVE9XJ+v0IrT0CsldH+8xdyfWxWPo+v2quzoTz9VVeO6gq7GEmTQvQIdKtoyaatILa2w46b1e6FT9ao5zrXiUjiRXqrgNnbx/1k46gu8PPzN40Ttqr8/HH8TJ6gzU8TtB9RUDjX11vXdQIQS2U34QTXm9eumoM0Q6su3YdLbPt4Epe6mifddGuV0AIRf2TaMuNfpegXXLDaKyr90M0uf2sn3TQfrMPuufAeaVLfEzRP3TgRL0T4n1fTGU3y8+mjSGBf7wTq2RCG1iFe5uDrpqPdyaVPvPrtiI+qqEdr4rP41uzKjlUQaySlxVG/7Qk1uMU7htxXvuUgL7pi/amTP/J5xrx403S3yR9+AKTD6Fdb6Fz2kJtj6KyRxP8KTMmmn6qgtCedNTHMA7U2bkUHdHHAEssxl1QQtH2dLH9mjGxkSyY+LGcSaxtxcIKmO1tVmuuve3APW0yXh+04YUxzHUHziHn5V4NzqED5+L/su3lU/vikHZCQkmDuz4cQtC+1shf4KFNCJzPkk2gPUwePLQJjdlXHt8dDsQ+cxk6NT2vKmJFXk0iV3oJdonF56HtcEK79HmI3gHF/usW2lH/rxp6P6Q9X7yi6HHCjlqSmA5TB85B/HfZ60nTda1uHw3h5/GR29qzam2RA2cvOe7wXj1urv36h1pMJ1uMjpK+m8ZeejtaQe2ojfZbJ3Pu6++Zi/v/rP1yXA9qB0z29w9amqPBC3jvCMgrd2PH2RNJrMkZATlso3PEhCuESEhDJII5VblZAO+UJjbQeqWT/kZq6H2PGcJ7xpTI+C3G6FKC6tk4ZpTmGB5TkIOQsKt3iUcoeSncqy7GMYPMNn21KLBNQFkqO8egrjBMWfNfLjA+pUmB58FbNSZYaSXU9HiI+e/VuiZnVfFrRjjny9icuYGNHFf0roEmA77MOWeuaBJ9w2BgtaEw4YOOhyu6C0mGz3UpYFWXrCYCJk6nuEIvlwE3gqv3MSVnvxPk7ijubJOIP5fQkbjDNeGXav7LBcanROw38HMAhxAQITDgtiCkh5EemelLUdqwwLuK7vd40myMDiWgobHmm9HW15hO8YAoNNTzS8b7geSgaoSugiqlrK/WJyQWvnXYV8OQdnwO9GQkfLkON07R74xoWpAOoW5JOrj43kWVEHS+LYS5inNYJOeagEZ3v3VA+d/jNGKBbSwZYLSGcL6Uz5fLSJw2us1dwXnlSmIVHMOYlrIdDOkjJ0z/boKS6irxafo74yRWlq4GGL0lILQtbAiC72z+qYmPtOOEWbJr5Gd1Y8jgdlEItqS/mdK1aBKL66Jo+7mvjuKjnL41dxDLeamRxTReTs+rynk1COE38S1L9p/h5rWPWWwes7/qILEG0wkfeOuiKvvO0O7G+VNqTORjdExAEslswzhQZ+eydES1fiOsFxObyIKZDyj/OGEpsXACZnsbFnfWxI9UAU9kCLFnnuOEO+KLBTpLVVRxhImblBhnuZxj2Ignk7iun46x4DKx+hA7lT7MGuoUj3uFWJH/TZdcC+2SbDsGdbZPFnwRT/yt1pCUZZZIq8O9XQjzbNIie0VTXVfsdYx85Jb2rGnjCnrjoUNJrOqv4H6d+4O2l3w3fT9mczX1/dq5i/uNiMfxuZe6Zy7q/832y5F/fu+lnjJgtKf6ZHUJkJJYIcH55LjhDX64qJMYQ02hSGL1qGzol5py0wyl0zb/E5nHvqQqbsz43gWJSrs17+Kb3/qlXCU2QVcyHgJ+L9h6JOaKBsZs/kt/hKZyDqEBjDnfFMfNzoqvIAfU7CthMpHIHB4fQ/OuBIznrDuiEAbS3MkZ8UHfDFJV4piklVkSa/pRk3DlG5BQJlbHAtNkIAntRkdyrkb80oy3+NYRvpT5Vw2QowEGp/GghQVl4XqM9MhQX4rSRm0nZL5x2YmVs7P1hglA3lchYct40HTK/66R9UUQIAgkhyU5Av9LM2Zb9DJnZpuK0oHr1lm6EzOdaxLpTjJhG3mgsErgmaxhici5H/TgXbWTwYhuc1dwXrmSWEXXbkpLyQ4yXqluf+IbKC67pr8zT2Jl6Cq390l/aHAELW1TliG7Zn5WN0YS4WYuki/eLDiyW6Z0LZjE0v7/PwM0Duvo3y7X961ZQWxhXrIKX6qQYe99eGtY2f6TJ4akJJYsF0ofLh1HUtG/1JgojH0T9DGMA3V2LktHVPpghHViYjNZMPMB5SexSomFdfxN3duw36j2UcHXluSHCySxntoXS+MqN65RAmtVeabD9FMVUS8dbovD5Oz8s7NKZvMkyipxa6hT4YY9FiuyXlw6HmfFB/JzPPGmlWUuM6eyPGh0IHM9W0pibXrPajJfgV98L1N0v877qQlJrKUY6xv7/hxzZ7Tgcy97z7xOJZbBflnnnwXe8ARWeqXa+lAnseSgQCV8JlAIqFBKfjHE+C4teZUmCDrhEJnHlJqfEY6DnwFmc9O8i8+fndeNg/dPSu0Pwc/lnrZYOXBU6iklsTLmn6rk37viF/4czSxFYxyWPLJ+UMJaT2IZWNPGmMZz1gfGgjMx4kN68z/WE6CDFudFWMaaL4mVNob45chsM5yEfqPDDLFw+0oWv3TjcWPPnD57V+PKZ0mV0AjeD9GIyZaRHhnqS1HaLJfLqH9A96QR6dU+FXnCkkNhko5XU4SBCjfa7Oy/OM92vIIjTYb9CYYXXbSPuCzxUuzQEWjXIb3TzDYVpYOZEzOdaxI5G8/yvwvvM+DlcrmMvoIRkjy6kDeJlTWvMpJYRms3oaVkB9P0Y3K2CmBMf5criZVGD62srJnEMqJvlp9N4YuMsDIy/Jp50wONbSyN6VowiVV4cx7CxLdmBbFFeZlnE1Si/xSPeAwwup2bfTxUzGGjMZGWfoZx4Bp2bq0kVpGYOE+z4EwfsPnG7oX4rhgzc2+TRpd7OfFTXhJra744c528UkauNtGAfXzk9vemBxo/onjrosp5weK9eM8dQ51ahlX54QdO/pw2IZE/dp18SJFlLZ2yE7m69WwlibXpPavxuNL7Cu/XxSO+/asxZg9iQsvY9+eYuyzfpe6Z12knsMzeL2dWlSsrrjcDdRJLKyw5b27RMGbxa4LheRsNThiy10D/Nq0HzLpJLH5GWAF2tWe6w2bndRXQnjnmpenhlbLjMcbjMfr/LTmJJf/fuo6b94NKgp+RLiOJJfyfXiGFZnBGfEjJ1l81QUkFjdMBRpwX479aJSexxP/bRBJrRStDfmnHY8/TDxP+Bbweq6RixjK4brP+A8K8M/TIUF8KJ7HCkv7XLbhXHterEbqHEk/4ZrR9HURf8KLgkBttdvZfgT+91CRWeOyQXSHLZelLF07hJFaWbSpKhx1PYpnyMq5b8pEH5XgvK4kVt4OpgVTs/0x/9/yTWCZ+NoUvSvrQWDKQCtfFG9P1qZJYJr51Z5NYa/jPhxm8y07U74dQB51r3RGbnEmssmKiF5bESvXxpvpm5AOeKIllEFupkLq3SaPLWn5sR3xxpi3glUdy9aAO/GNr84vPqrLk45Y8xpt+pFIbCvOkTzjX1teA/1u8WVGGTWJtYc9qPK70fOH9+hLhZQtu1MuMoHLUj1ooGfv+3HPnx27L3jOvkcQy2S9nVpV/aSKrUqssSEksXrWg6EvDsoblJLEEPEwxOKYgNK0RYPEkFitJzaog07yLl93G+wuYQHmlbELYDQ1CyhwiQQl5Uthxy40R1+BnrjnrFZKV/HNaGfFBQ8+wQeRfs4y1mCSx0ugkHhcoPYkl9HUy5FfKeGE59uS6negxUv00xeRMLHs30iNDfSlKGzaHNjyh1FVT9v6eXcPsj7ug8XkrGoaay7qq78Uy4QjU10Mn32lmm4rSwcyJacvnN5zEMuclsxmU1NE8rib6e7z0JFbcDurlhR+r4nQz/V1pSSxtH5HNJrHM/GwKX1S4dVElVbg/mK7Hy/uN6VowiSUcn0md3xq+NSuI5bxMvj+Dl6F/Sr3VevP+c7nw4b2vgZAa+v+ontHHuBuNicLLU557EsvExxvJgqkP2HYSyzy2yoS8t0mLPfiRptXctpfEKs0Xh0gc7wsRYDYeY2x8UVioMwMMDuWjVlyPz4arD7J5dYrPKWw/MfvayujNXHISi39grSYq2p55EmvdPavxuNLlUwX36yoEtwN2Ycg7fgze1PfnmLtAi7L3zEX9v+F+OSuWWfyaYjyeqm94LBmJxu7ybTwMYRloTkcoCyg31DKjFl47lSDrJLHCW1iSwrGA76fcGrFcRhUdVFFeuvD9jJJT+UpJ3qU/bxIr7OGTaCjJn499KS7uuMOyWtWZ3wD+Q/q7kjCdM1+rnBSQbwgx4oOGnlxJ5VvvmMzl74nFehckewEsbnqoxr7irJfESjYKZOOu9NKIX2nj3bqokhpqB2JQFVy3QQ4baLwS9dRIjwz1pShtJh+I4nanGfqqL4Y3PVDSQPOYSj0oeB+EuiJIffBX8qqU9Yn6Wmh+007kCG7dWANUvcyZ2aaidDB0Yrq5futsNIllzMtfIzQpvz2KH2WonsVKlEtOYgkl3jF+jE+3kMTKsoOcV4lk0S+pebbp78pKYoX2Xtb7RxYQbSqJZeZnU/iiBFtLtV5HVbavpnTVJIJYwjglzrlh18rLlQBiry5T38o3gDJP+Nz0X2I1vOQbLj0vFfFIOI/6SoZL9Z+6/l13fdRkW5Pw4UnfKfOn3JhI7o8j0e25JLHyxGIZsmDmA7adxDKPrQQY7W14wiTR0HyByQe5D9f2klil+eIQWT09c2D6kYLsVVBRvI+1v6iyptyCTc6TxFrZg3o9u+9zqUmsUB4SepKtA7r17EQSa909q27cVz1MpJ5jQh+ogvt1dY9EHkeEazX1/bq5J1o0yT2sSt4zF/X/hvtlk5uWt4VEEityNAdtDG99BMEck8s2aoSieRW77vRrG86Rq87EawV0Cvcg/u4A/p2HXl1l2EWGeycE5FUH3n0A/2HVyydboXyMjtm19d2vM/hBgMCfYvinA0qls9CabCklFM7ZCDM/QBD4mF514FAq3moRR3j17LsBJvcB/LsxBu/YbV/5k1jMWFNCUf/osTn4M4zOHFD5tobw1p2/pmydCxUPNEK4mLKGi3st9G/mCIIAwf0Y7lEF5LXLrkU2DthM58zXSiiaFxPMQ968Y19SV1fTmvBBR8+VzA1u5gj8GcaX7BpzcS0qGVM5Bel9QYD5TR+tPfHmivWSWASE1tHzZvADHzOvhzqVrkU24dcy/ILQQP/WR/AQ79HAjZ6csOYNdpOVRCZ6ZKYvRWkjy9T8doTeG8ropeL7KyZbiSOBP/hV2G/7mNyzpqbzsYvGHoETNiFUynoA74TGeDPH9CvjDZGSAYxWSR7KiVMTmhajg2lgazpXGesFzmZr4HOLffFlz8WOMpScxIpuljzmsuHP4J032BXGm05iZdpBWf4C+HcjdF9T6Xplw9+VlsRa6X3IT//OW/EzLYnFv/xVTz3MA583FDVPwGb7Wd0YerBkhsqmmNKf256DLkZ3YQzVgUNJ5se60TEVbPr8ZoD2gWj7TeMB1se0FtmW+c0AnddUtFUKmS3KS2ZXY7bfn8H7WGfzimS4TP8Z48c49IETDE6qrPrlQeNXQt8ZzSGMcSX+lBwTsTXJm6XNJ7GUMcDdAK3DBtwfuo8l68XEJrJg6gMKJbE2HQsnxjTc20g2KwjmmFy0UJH2V1r+rmMvpb+X7ovj9lNZ8ODDO20JR32zEH5MU/Yc4utIHlvKl8SKEvSKWFGtw2UlsVaxaCQP95OVDyuQxNLH+xn0MfKRW9qz6sYlBPRND96dH7MnVEi4rLdfH2LKn5l5PdbTKaqSM/X9+rnX3odzmmOsmHvZe+Zi/t9sv5xZiXXbR+vt6jjmJqFIYi2xfJyi3+T9BQgBoftoXc4EhZhd1DRfK2JQOY1AejchqBy5mOi+cETK7vIgMFSWHIbqt49xtBHhY9Y7GEWGL70puM83uOGzZK+Oztd0Qzy/7qzmSyqon47gXTgxg5vza8F3cQ70jybc77JS+rxBXSwYy3O0IZixDXREJ4r9Zh/TMJmRI2Azm3OYVe6L40o9Lcz4kMLDey9S3oj317x6JlbynpQxjXN6lOlUQf10iNljBn1170vwpY3htz6af8TmfKQwCFn8Wi6x/DVCK6RZosqMKoID/oVARcdMPTLTl+KN3ReYXsTGp/toXowxfEegOoYzPa9qGy4G/wwFmWDvmmZUYi2ZbTyqCDLdHw9ZxUm814Pqd+dy9Z85TfPTIUdgq5rr136xI3DGiZzsNYS3m4hft8OvcjyYLjuJtZRtN9O90V+NzSexjOxggIkgLxT7TZX/NPhdiUksNT/d7Eqs5QLT89Wac/VzSvBK5Wd1Y6SANyVVf5U2o//iti/6vtcdeH8b9BV9nGF4Wo+9n61pLjUrN4oHJL1m8jRENzWI1fFyhP5RFi/ZvOJ+i/7RRP+HPK8S/efvOUYCvRith3cL/TPLpTImUPKnzJjo5wD1xEZ580ksVQwQVgG3Pd3RrvVj4mxZMPFjBZNYm46FVTDc2yzupNhjr47O1UxKOOiODa1nL+N/L90Xx/6mrGgKK2XeydUmKQh7mCrtDq82ScR4OZNYOU4alZ3EWi5VPsxFt5H9nLrHlz7eT6ePiY/c0p5VN+7JEOOLJrtwgNOqcZG8+a7Ifj34Ie63CKmgcT6R5NQ09lLMveFiKDyrnnupe+ai/t9gv5yVxGLVrKsj/ZuEOokVQnn1akng1yCrM6/pzxSfDx+zYHYwfuVmnvHk23LWgckclFdkbpo3heYsGT9DecvPhxx0ySNjmivey0SeOev5VfY8zfSoKJ+M6V7Gux4L2rhHw7XFf5e64Slgm8qkQ5417SovS0W5NjAdReyg6fy2uQ6ZnwY9sUKsJXuG9s10jFsXVaKvhMxD18K+2NC3GNnYorRdQzeN5lWq/ywW2xnzp5SYiH3NVx132Tw2E6sYx6Npv9mgD3iSWNj0mXVs3iZ8dRl8+DlAXXFceLfBPtxGt9I+CeL6mZ5AzveunNiAXJW/Zy3RNyrXX1bspeDl2n69QKxYkKdr284tIT2JZWGxUeTY5FhYPBssMLnoYyJ8tQ3gndLMxqEW/0Y8fzu4uOmjL31lDbyOssfTbmOB8SkFeTsyrxawsDDFs9zkW1iYgNnOp0nSroGfA9QTfaC3hPsR3KtkT786SeuzZPF8sG5CUsbzjxXLhk1iWTwhrEJavETMWYk03Ufz1IV73kW7XgEhFbS+2MDEQsbzt4PzL6yvy36zA/fcRfeEHfGqvB2tf8vXVjCHd+6i09yXejhZWJSL+VUTNLWPioXF88Nzk+vgZgD3rI36ntRvdotY3PTgUIJKvY3uuQv3lB2Xo6+fDx0t0mCTWJuGTWJZPCHYxsHN0ejRwuK5wL8Zon/agnPYQPtsgPEvuzG2UOGF2EF/guFFB61DB42TLgZj/Y1Au4cp+ocOnKN2oqLMwqJcLDC7bKFhKy0sXgoWY3TrXYwzehvvEvzrDpxDB60PI6Ef39YRzOFddtE+cuC87aD/dYpggy1KLLaJAJNLF+6l3F+rKF5IrFgibBLLwsLCwsLCwsLCwsLCwsLCwmLnYZNYFhYWFhYWFhYWFhYWFhYWFhY7D5vEsrCwsLCwsLCwsLCwsLCwsLDYefwPmJ7t+GmAIw0AAAAASUVORK5CYII="
    }
   },
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![%E5%9B%BE%E7%89%87.png](attachment:%E5%9B%BE%E7%89%87.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "可能不受欢迎的原因，因为需要进行log运算并求和，再进行除法运算：\n",
    "- 1、计算成本会大幅提高\n",
    "- 2、需要进行反向传播"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
