{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-11-15T01:02:39.206372Z",
     "start_time": "2024-11-15T01:02:36.893192Z"
    }
   },
   "source": [
    "#环境准备\n",
    "import numpy as np              # numpy数组库\n",
    "import math                     # 数学运算库\n",
    "import matplotlib.pyplot as plt # 画图库\n",
    " \n",
    "import torch             # torch基础库\n",
    "import torch.nn as nn    # torch神经网络库\n",
    "import torch.nn.functional as F"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-15T02:19:33.260535Z",
     "start_time": "2024-11-15T02:19:33.256395Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#nn.Linear类创建全连接层\n",
    "#nn.Linear(in_features, out_features,bias = True)\n",
    "#in_features: 输入的二维张量的大小, 即输入的[batch_size, size]中的size, 是X的维度, Y = WX + b, X的维度决定的W的维度, 总的参数个数 = in_features + 1\n",
    "#out_features: 输出的二维张量的大小, 即输出的二维张量的形状为[batch_size output_size],决定了全连接层中神经元的个数, 多少个输出, 就需要多个个神经元。\n",
    "#bias: 是否加入偏置项, 即有无b\n",
    "#torch.randn生成服从正态分布的随机数\n",
    "#生成1行5列的张量，每个张量是一个5行1列的张量\n",
    "x_input = torch.randn(1, 3, 3, 1)\n",
    "print(x_input)\n",
    "#全连接层的输入通常是一个二维张量，因此需要降维成二维才能作为输入\n",
    "#通常降维操作是把一个元素的所有特征排列成一个一维数组，即从四维(1, 3, 3, 1)降维成(1, 3 * 3 * 1)\n",
    "x_input = x_input.view(1, 3 * 3 * 1)\n",
    "print(x_input)\n",
    "full_connect_layer = nn.Linear(in_features= 3 * 3 * 1 , out_features=3)\n",
    "\n",
    "print(\"full_connect_layer:\", full_connect_layer)\n",
    "print(\"parameters        :\", full_connect_layer.parameters)\n",
    "\n",
    "y_output = full_connect_layer(x_input) \n",
    "print(\"y_output.shape:\", y_output.shape)\n",
    "print(\"y_output:\", y_output)"
   ],
   "id": "78797a6df662709a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[[-0.0384],\n",
      "          [ 0.4475],\n",
      "          [-0.1220]],\n",
      "\n",
      "         [[ 0.7178],\n",
      "          [ 0.7475],\n",
      "          [-0.0817]],\n",
      "\n",
      "         [[ 0.0891],\n",
      "          [-0.6094],\n",
      "          [ 1.5752]]]])\n",
      "tensor([[-0.0384,  0.4475, -0.1220,  0.7178,  0.7475, -0.0817,  0.0891, -0.6094,\n",
      "          1.5752]])\n",
      "full_connect_layer: Linear(in_features=9, out_features=3, bias=True)\n",
      "parameters        : <bound method Module.parameters of Linear(in_features=9, out_features=3, bias=True)>\n",
      "y_output.shape: torch.Size([1, 3])\n",
      "y_output: tensor([[ 0.0386,  0.3000, -0.0743]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-15T02:19:40.125167Z",
     "start_time": "2024-11-15T02:19:40.119773Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#nn.functional 激活函数、损失函数\n",
    "'''\n",
    "nn.xxx 是指 torch.nn 模块中的类，例如 nn.Linear, nn.Conv2d, nn.ReLU 等。这些类继承自 nn.Module，是PyTorch中定义神经网络层的标准方式。\n",
    "优点：\n",
    "状态管理：nn.xxx 类通常包含可学习的参数（如权重和偏置），这些参数会自动注册到 nn.Module 中，并在调用 model.parameters() 或 model.named_parameters() 时返回。\n",
    "方便的初始化：nn.xxx 类通常有内置的参数初始化方法，可以通过构造函数进行配置。\n",
    "模块化：nn.xxx 类可以方便地嵌入到更复杂的模型中，通过 nn.Sequential 或自定义的 nn.Module 子类进行组合。\n",
    "自动微分：nn.xxx 类会自动处理前向传播和反向传播，无需手动管理梯度。\n",
    "缺点：\n",
    "灵活性较低：nn.xxx 类的行为通常是固定的，难以在运行时动态改变。\n",
    "2. nn.functional.xxx\n",
    "nn.functional.xxx 是指 torch.nn.functional 模块中的函数，例如 F.linear, F.conv2d, F.relu 等。这些函数通常是无状态的，不包含可学习的参数\n",
    "优点：\n",
    "灵活性高：nn.functional.xxx 函数可以在运行时动态地改变行为，例如在不同的输入上应用不同的激活函数。\n",
    "无状态：nn.functional.xxx 函数不包含可学习的参数，因此不会自动注册到 nn.Module 中。这使得它们更适合用于不需要学习参数的操作，如激活函数、池化操作等。\n",
    "轻量级：由于没有状态管理，nn.functional.xxx 函数通常比 nn.xxx 类更轻量级。\n",
    "缺点：\n",
    "手动管理参数：如果操作涉及可学习的参数（如线性层或卷积层），需要手动定义和初始化这些参数，并在前向传播中显式传递。\n",
    "复杂性增加：使用 nn.functional.xxx 函数时，需要手动管理前向传播和反向传播的细节，增加了代码的复杂性。\n",
    "使用场景\n",
    "nn.xxx：适用于大多数神经网络层的定义，特别是那些包含可学习参数的层（如线性层、卷积层、循环层等）。\n",
    "nn.functional.xxx：适用于无状态的操作（如激活函数、池化操作、损失函数等），或者在需要高度灵活性的情况下（如动态计算图、自定义层等）。\n",
    "'''\n",
    "print(y_output)\n",
    "sig_out = nn.functional.sigmoid(y_output)\n",
    "print('sigmod_shape',sig_out.shape)\n",
    "print('sigmod_out',sig_out)\n",
    "\n",
    "rl_out = nn.functional.relu(y_output)\n",
    "print('ReLU_shape',rl_out.shape)\n",
    "print('ReLU_out',rl_out)"
   ],
   "id": "3691fe97c98b0b98",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0.0386,  0.3000, -0.0743]], grad_fn=<AddmmBackward0>)\n",
      "sigmod_shape torch.Size([1, 3])\n",
      "sigmod_out tensor([[0.5097, 0.5744, 0.4814]], grad_fn=<SigmoidBackward0>)\n",
      "ReLU_shape torch.Size([1, 3])\n",
      "ReLU_out tensor([[0.0386, 0.3000, 0.0000]], grad_fn=<ReluBackward0>)\n"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-15T02:21:17.930240Z",
     "start_time": "2024-11-15T02:21:17.918831Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#nn.Parameter 参数类\n",
    "#创建一个model时，nn会自动创建相应的参数parameter，并会自动累加到模型的Parameter 成员列表中\n",
    "\n",
    "Weights1 = nn.Parameter(torch.rand(3*3*1))\n",
    "print(\"Weights.shape:\", Weights1.shape)\n",
    "print(\"Weights      :\", Weights1)\n",
    "\n",
    "Bias1 = nn.Parameter(torch.rand(1))\n",
    "print(\"Bias.shape:\", Bias1.shape)\n",
    "print(\"Bias      :\", Bias1)\n",
    "print(\"\\nfull_connect_layer\")\n",
    "full_connect_layer = nn.functional.linear(x_input, Weights1)\n",
    "print(full_connect_layer)"
   ],
   "id": "497fc58248ee52f7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Weights.shape: torch.Size([9])\n",
      "Weights      : Parameter containing:\n",
      "tensor([0.1458, 0.5597, 0.8853, 0.8888, 0.2166, 0.0884, 0.4524, 0.8860, 0.5404],\n",
      "       requires_grad=True)\n",
      "Bias.shape: torch.Size([1])\n",
      "Bias      : Parameter containing:\n",
      "tensor([0.1916], requires_grad=True)\n",
      "\n",
      "full_connect_layer\n",
      "tensor([1.2810], grad_fn=<MvBackward0>)\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-15T02:36:22.986701Z",
     "start_time": "2024-11-15T02:36:22.982354Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#nn.Module类，是一个抽象概念，既可以表示神经网络中的某个层（layer），也可以表示一个包含很多层的神经网络\n",
    "#nn.Sequential类创建神经网络\n",
    "#生成一个神经网络，包括：线性层（全连接层），输入维度为 28*28，输出维度为32->ReLU激活函数层 ->线性层（全连接层），输入维度为 32，输出维度为10 ->Softmax层，作用是将前一层的输出转换为概率分布\n",
    "'''\n",
    "输入层：输入数据的维度为 (batch_size, 5*5)。\n",
    "第一层：线性层 nn.Linear(3*3, 5)，输出维度为 (batch_size, 5)。\n",
    "第二层：ReLU 激活函数层 nn.ReLU()，输出维度为 (batch_size, 5)。\n",
    "第三层：线性层 nn.Linear(5, 3)，输出维度为 (batch_size, 3)。\n",
    "输出层：Softmax 层 nn.Softmax(dim=1)，输出维度为 (batch_size, 3)，表示每个类别的概率分布。\n",
    "'''\n",
    "model_c = nn.Sequential(nn.Linear(3*3, 5), nn.ReLU(), nn.Linear(5, 3), nn.Softmax(dim=1))\n",
    "print(model_c)\n",
    "\n",
    "'''\n",
    "print(\"\\n显示网络模型参数\")\n",
    "print(model_c.parameters)\n",
    "'''\n",
    "\n",
    "print(\"\\n定义神经网络样本输入\")\n",
    "x_input = torch.randn(2,3, 3, 1)\n",
    "print(x_input.shape)\n",
    "#view(x_input.size()[0], -1) 将张量重塑为形状为 (2, -1) 的新张量。-1 表示自动计算该维度的大小，使得总元素数量保持不变。\n",
    "x_input=x_input.view(x_input.size()[0],-1)\n",
    "print(\"\\n使用神经网络进行预测\")\n",
    "#调用神经网络进行输出\n",
    "y_pred = model_c.forward(x_input)\n",
    "print(y_pred)"
   ],
   "id": "4c5b0b438063efc1",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Sequential(\n",
      "  (0): Linear(in_features=9, out_features=5, bias=True)\n",
      "  (1): ReLU()\n",
      "  (2): Linear(in_features=5, out_features=3, bias=True)\n",
      "  (3): Softmax(dim=1)\n",
      ")\n",
      "\n",
      "定义神经网络样本输入\n",
      "torch.Size([2, 3, 3, 1])\n",
      "\n",
      "使用神经网络进行预测\n",
      "tensor([[0.1794, 0.1233, 0.6973],\n",
      "        [0.3679, 0.2034, 0.4287]], grad_fn=<SoftmaxBackward0>)\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-15T02:58:24.593035Z",
     "start_time": "2024-11-15T02:58:24.575121Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#自定义神经网络类\n",
    "#从Module继承\n",
    "print(\"自定义新的神经网络模型的类\")\n",
    "class NNet(torch.nn.Module):\n",
    "    # 定义神经网络，输入数量，隐藏层输入数量，输出数量\n",
    "    def __init__(self, n_feature, n_hidden, n_output):\n",
    "        super(NNet, self).__init__()\n",
    "        self.h1 = nn.Linear(n_feature, n_hidden) #线性层（全连接层）\n",
    "        self.relu1 = nn.ReLU()                   #ReLU激活函数层\n",
    "        self.out = nn.Linear(n_hidden, n_output) #线性层（全连接层）\n",
    "        self.softmax = nn.Softmax(dim=1)         #Softmax层\n",
    "        \n",
    "    #定义前向运算\n",
    "    def forward(self, x):\n",
    "        #按层顺序计算\n",
    "        x = x.view(x.size()[0],-1) # -1表示自动匹配\n",
    "        #线性层完成in->hidden1\n",
    "        h1 = self.h1(x)\n",
    "        #激活函数层进行特征信息提取\n",
    "        a1 =  self.relu1(h1)\n",
    "        #线性层完成hidden->out\n",
    "        out = self.out(a1)\n",
    "        #out层输出结果\n",
    "        a_out = self.softmax(out)\n",
    "        return out\n",
    " \n",
    "print(\"\\n实例化神经网络模型对象\")\n",
    "model = NNet(3*3, 5, 3)\n",
    "print(model)\n",
    " \n",
    "print(\"\\n显示网络模型参数\")\n",
    "print(model.parameters)\n",
    " \n",
    "print(\"\\n定义神经网络样本输入\")\n",
    "x_input = torch.randn(2, 3, 3, 1)\n",
    "print(x_input.shape)\n",
    " \n",
    "print(\"\\n使用神经网络进行预测\")\n",
    "y_pred = model.forward(x_input)\n",
    "print(y_pred)"
   ],
   "id": "66fb311960e15bd6",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "自定义新的神经网络模型的类\n",
      "\n",
      "实例化神经网络模型对象\n",
      "NNet(\n",
      "  (h1): Linear(in_features=9, out_features=5, bias=True)\n",
      "  (relu1): ReLU()\n",
      "  (out): Linear(in_features=5, out_features=3, bias=True)\n",
      "  (softmax): Softmax(dim=1)\n",
      ")\n",
      "\n",
      "显示网络模型参数\n",
      "<bound method Module.parameters of NNet(\n",
      "  (h1): Linear(in_features=9, out_features=5, bias=True)\n",
      "  (relu1): ReLU()\n",
      "  (out): Linear(in_features=5, out_features=3, bias=True)\n",
      "  (softmax): Softmax(dim=1)\n",
      ")>\n",
      "\n",
      "定义神经网络样本输入\n",
      "torch.Size([2, 3, 3, 1])\n",
      "\n",
      "使用神经网络进行预测\n",
      "tensor([[-0.4702,  0.5832, -0.5381],\n",
      "        [-0.0998, -0.2714,  0.2230]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "execution_count": 27
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "b8fe04abb17859b4"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
