{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "12e0cb86-74e0-46bf-a27a-5038b4e7f242",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision.models as models\n",
    "import torch.nn as nn\n",
    "torch.set_printoptions(sci_mode=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a12dc686-058d-4b93-8f0d-6e7cfe721721",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# save model wights\n",
    "model = models.vgg16(weights='IMAGENET1K_V1')\n",
    "torch.save(model.state_dict(), 'model_weights.pth')\n",
    "\n",
    "# load model wights\n",
    "model = models.vgg16() # we do not specify ``weights``, i.e. create untrained model\n",
    "model.load_state_dict(torch.load('model_weights.pth', weights_only=True))\n",
    "model.eval()\n",
    "\n",
    "# save model\n",
    "torch.save(model, 'model.pth')\n",
    "\n",
    "# load model\n",
    "model = torch.load('model.pth', weights_only=False)\n",
    "\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3722461-5a8d-4913-bdcd-e0a099eb9c40",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "X = torch.tensor([[1,2,3], [4,5,6], [7,8,9]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)  # (3, 3) -> (1, 1, 3, 3)\n",
    "K = torch.tensor([[1,0], [0,1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0) # (2, 2) -> (1, 1, 2, 2)\n",
    "\n",
    "# 单通道卷积示例\n",
    "conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=2, stride=1, padding=0, bias=False)  # IO通道, 卷积核大小, stride步长1, padding=0表示不进行填充, 不学习偏置\n",
    "conv.weight = nn.Parameter(K)  # 指定卷积核\n",
    "y = conv(X)\n",
    "\n",
    "print(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb3ede32-b7c8-4d7a-9c88-5f3fe5822232",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "X = torch.tensor([[1,2,3], [4,5,6], [7,8,9]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)  # (3, 3) -> (1, 1, 3, 3)\n",
    "K = torch.tensor([[1,0], [0,1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0) # (2, 2) -> (1, 1, 2, 2)\n",
    "\n",
    "# 单通道卷积示例\n",
    "conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=2, stride=1, padding=0, bias=False)  # IO通道, 卷积核大小, stride步长1, padding=0表示不进行填充, 不学习偏置\n",
    "conv.weight = nn.Parameter(K)  # 指定卷积核\n",
    "y = conv(X)\n",
    "\n",
    "print(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32c8b06d-f173-4177-94d2-7ccd56a53d21",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "X = torch.tensor([[[2,2,2], [4,5,6], [7,8,9]],\n",
    "                 [[1,2,3], [3,3,3], [7,8,9]],\n",
    "                 [[1,2,3], [4,5,6], [4,4,4]]],\n",
    "                 dtype=torch.float32).unsqueeze(0)  # (3, 3, 3) -> (1, 3, 3, 3),   (样本数N, 输入通道数C, 样本高H, 样本宽W)\n",
    "\n",
    "conv = nn.Conv2d(in_channels=3, out_channels=5, kernel_size=2, stride=1, padding=0, bias=False)\n",
    "y = conv(X)\n",
    "print(y)\n",
    "\n",
    "\n",
    "# K = torch.tensor([[ [[1,0], [0,1]],  [[1,0], [1,0]],  [[0,1], [0,1]] ]], dtype=torch.float32)  # (1,3,2,2)\n",
    "K = torch.tensor([[[[1,0], [0,1]],  [[1,0], [1,0]],  [[0,1], [0,1]]], \n",
    "                  [[[1,0], [0,1]],  [[0,1], [0,1]],  [[1,0], [1,0]]]],\n",
    "                 dtype=torch.float32) # (2, 2) -> (2, 3, 2, 2)   (输出通道数O, 输入通道数C, 卷积核高H, 卷积核宽W)  卷积核可用的前提是与样本具有相同输入通道数\n",
    "\n",
    "# 多通道卷积示例\n",
    "conv = nn.Conv2d(in_channels=3, out_channels=2, kernel_size=2, stride=1, padding=0, bias=False)  # IO通道, 卷积核大小, stride步长1, padding=0表示不进行填充, 不学习偏置\n",
    "conv.weight = nn.Parameter(K)  # 指定卷积核参数以后, conv的in_channels, out_channel失效, 这两个参数是用于没有卷积核参数时, 初始化卷积核参数weight\n",
    "y = conv(X)\n",
    "\n",
    "print(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db39bc78-e8f5-49d9-bfa4-327362e83af6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn.functional as Fun"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45cef37b-a351-4533-be53-0063a6820c6a",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 定义一个2D最大池化层，类似于卷积, 但是直接从模型可以知道聚合方式: max, avg\n",
    "# 通过池化层进行前向传播\n",
    "print(Fun.max_pool2d(y, (1, 2)))  # 池化窗口大小为kernel_size=1，步长stride=2\n",
    "\n",
    "mp = nn.MaxPool2d(1, 1)(y)\n",
    "print(mp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e1dceef4-34e7-45b1-9b75-0d8ebe70df41",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.functional as F\n",
    "\n",
    "\n",
    "# Convolutional Layers 卷积神经网络示例\n",
    "class LeNet(torch.nn.Module):\n",
    "\n",
    "    def __init__(self):\n",
    "        super(LeNet, self).__init__()\n",
    "        # 1 input image channel (black & white), 6 output channels, 5x5 square convolution\n",
    "        # kernel\n",
    "        self.conv1 = torch.nn.Conv2d(1, 6, 5)  # 卷积层\n",
    "        self.conv2 = torch.nn.Conv2d(6, 16, 3)\n",
    "        # an affine operation: y = Wx + b\n",
    "        self.fc1 = torch.nn.Linear(16 * 6 * 6, 120)  # 6*6 from image dimension\n",
    "        self.fc2 = torch.nn.Linear(120, 84)\n",
    "        self.fc3 = torch.nn.Linear(84, 10)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # Max pooling over a (2, 2) window\n",
    "        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))  # 池化层\n",
    "        # If the size is a square you can only specify a single number\n",
    "        x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n",
    "        x = x.view(-1, self.num_flat_features(x))  # reshape\n",
    "        x = F.relu(self.fc1(x))\n",
    "        x = F.relu(self.fc2(x))\n",
    "        x = self.fc3(x)\n",
    "        return x\n",
    "\n",
    "    def num_flat_features(self, x):\n",
    "        size = x.size()[1:]  # all dimensions except the batch dimension\n",
    "        num_features = 1\n",
    "        for s in size:\n",
    "            num_features *= s\n",
    "        return num_features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "479cacf5-4c3b-45b2-8e7e-a9c744dbde4b",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# @ 矩阵乘法, t()转置\n",
    "print(torch.tensor([[3,1], [2,1]]).t())\n",
    "torch.tensor([2,4]) @ torch.tensor([[3,1], [2,1]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee844866-f6e4-4d34-a1d7-253c54bf214d",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# an Embedding module containing 10 tensors of size 3\n",
    "embedding = nn.Embedding(10, 3)  # weight.shape(10, 3)  max_norm=1.0 时会在向前传播时将权重的词向量重新限制到最大范数以内, 默认范数类型为L2: 词向量平方和的开方\n",
    "# a batch of 2 samples of 4 indices each\n",
    "input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])\n",
    "print(embedding.weight)\n",
    "print(embedding(input))\n",
    "print(embedding.weight)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "90d99e90-4004-46a7-b8be-f117d018830a",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# example with padding_idx=5, 向量词典中 padding_idx 位置的向量为0, 且不会进行学习更新\n",
    "embedding = nn.Embedding(10, 3, padding_idx=5)\n",
    "input = torch.LongTensor([[0, 2, 0, 5]])\n",
    "print(embedding(input))\n",
    "print(embedding.weight)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py312",
   "language": "python",
   "name": "py312"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
