{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-02-14T01:33:37.719983Z",
     "start_time": "2025-02-14T01:33:37.713094Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn"
   ],
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:30:37.403370Z",
     "start_time": "2025-02-14T01:30:37.388807Z"
    }
   },
   "cell_type": "code",
   "source": [
    "a = torch.tensor([1, 2, 3])\n",
    "b = torch.randn(3, 3)  # 随机初始化"
   ],
   "id": "dc4ec185989c7dff",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "描述： 卷积层用于提取输入数据中的局部特征，通常应用于图像数据。它通过滤波器（卷积核）对输入进行卷积运算。\n",
    "\n",
    "输入：形状为 (batch_size, in_channels, height, width)\n",
    "\n",
    "输出：形状为 (batch_size, out_channels, out_height, out_width)\n",
    "\n",
    "batch_size：样本的数量。\n",
    "\n",
    "in_channels：输入图像的通道数（例如 RGB 图像是 3 通道）。\n",
    "\n",
    "height 和 width：输入图像的高度和宽度。\n",
    "\n",
    "out_channels：卷积核的数量，决定了输出特征图的深度。\n",
    "\n",
    "out_height 和 out_width：输出特征图的高度和宽度，由卷积核的大小、步长和填充决定。\n",
    "\n",
    "这里，卷积层的 kernel_size=3，意味着每次卷积操作使用一个 3x3 的卷积核，步长（stride）为 1，填充（padding）为 1，因此输出的图像尺寸保持与输入相同。\n"
   ],
   "id": "c1d7988bed861b3e"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:37:59.063780Z",
     "start_time": "2025-02-14T01:37:59.058462Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 创建一个 1 通道，28x28 的输入\n",
    "input_tensor = torch.randn(8, 1, 28, 28)  # batch_size=8, channels=1, height=28, width=28\n",
    "\n",
    "# 定义一个卷积层，输入通道 1，输出通道 16，卷积核大小 3\n",
    "conv = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)\n",
    "\n",
    "# 输出：batch_size=8, output_channels=16, height=28, width=28\n",
    "output = conv(input_tensor)\n",
    "print(output.shape)  # torch.Size([8, 16, 28, 28])"
   ],
   "id": "ef674dfd49507f5e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 16, 28, 28])\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:39:09.638591Z",
     "start_time": "2025-02-14T01:39:09.617187Z"
    }
   },
   "cell_type": "code",
   "source": [
    "pool = nn.MaxPool2d(kernel_size=2, stride=2)\n",
    "input_tensor = torch.randn(8, 1, 28, 28)  # batch_size=8, channels=1, height=28, width=28\n",
    "output = pool(input_tensor)\n",
    "print(output.shape)  # torch.Size([8, 1, 14, 14])"
   ],
   "id": "4488d08dae88fc0f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 1, 14, 14])\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:39:18.188881Z",
     "start_time": "2025-02-14T01:39:18.170214Z"
    }
   },
   "cell_type": "code",
   "source": [
    "batch_norm = nn.BatchNorm2d(num_features=16)\n",
    "input_tensor = torch.randn(8, 16, 28, 28)  # batch_size=8, channels=16, height=28, width=28\n",
    "output = batch_norm(input_tensor)\n",
    "print(output.shape)  # torch.Size([8, 16, 28, 28])\n"
   ],
   "id": "ad002289423d09be",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 16, 28, 28])\n"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:39:29.479626Z",
     "start_time": "2025-02-14T01:39:29.466752Z"
    }
   },
   "cell_type": "code",
   "source": [
    "relu = nn.ReLU()\n",
    "input_tensor = torch.randn(8, 16)  # batch_size=8, features=16\n",
    "output = relu(input_tensor)\n",
    "print(output.shape)  # torch.Size([8, 16])\n"
   ],
   "id": "a99f0fbd8abad62a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 16])\n"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:39:34.869941Z",
     "start_time": "2025-02-14T01:39:34.850047Z"
    }
   },
   "cell_type": "code",
   "source": [
    "linear = nn.Linear(in_features=128, out_features=64)\n",
    "input_tensor = torch.randn(8, 128)  # batch_size=8, features=128\n",
    "output = linear(input_tensor)\n",
    "print(output.shape)  # torch.Size([8, 64])\n"
   ],
   "id": "9c82d9c64e087827",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 64])\n"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-14T01:39:18.715583Z",
     "start_time": "2025-02-14T01:39:18.713793Z"
    }
   },
   "cell_type": "code",
   "source": "",
   "id": "b2340dce352f0d1",
   "outputs": [],
   "execution_count": 11
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "2b49909fd2300168"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
