{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-02-08T00:56:02.530059Z",
     "start_time": "2025-02-08T00:56:01.257471Z"
    }
   },
   "source": [
    "import torch\n",
    "\n",
    "print(torch.__version__)\n",
    "\n",
    "import numpy as np"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.3.1+cu121\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 1.Tensor的理解：\n",
    "    1.常数，scaler:0阶张量\n",
    "    2.向量,vector,1阶张量\n",
    "    3.矩阵,matrix:2阶张量\n",
    "    4.3阶张量"
   ],
   "id": "75b4604198d64048"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 2.Tensor的创建：\n",
    "     1.使用列表创建Tensor\n",
    "     2.使用numpy数组创建Tensor\n",
    "     3.通过torch的API创建Tensor"
   ],
   "id": "1385d490fe701847"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:09:14.860936Z",
     "start_time": "2025-02-08T01:09:14.854936Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用列表创建Tensor\n",
    "\n",
    "# 1阶张量\n",
    "# torch.Tensor(data):\n",
    "t1 = torch.Tensor([1, 2, 3])\n",
    "print(t1)\n",
    "\n",
    "# 使用numpy数组创建Tensor\n",
    "array1 = np.arange(12).reshape(3, 4)\n",
    "t2 = torch.Tensor(array1)\n",
    "print(t2)"
   ],
   "id": "a76d41da6f8db1db",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([1., 2., 3.])\n",
      "tensor([[ 0.,  1.,  2.,  3.],\n",
      "        [ 4.,  5.,  6.,  7.],\n",
      "        [ 8.,  9., 10., 11.]])\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:10:31.989555Z",
     "start_time": "2025-02-08T01:10:31.982268Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 通过torch的API创建Tensor\n",
    "\"\"\"\n",
    "torch.empty(3,4)：创建3行四列的空的tensor,会用无用的数据进行填充\n",
    "torch.ones([3,4]):三行四列全为1的tensor\n",
    "torch.zeros([3,4]):三行四列全为0的tensor\n",
    "torch.rand([3,4]):三行四列随机值在[0,1]之间的值\n",
    "torch.randint(low = 0, high = 10, size = [3, 4]) 创建3*4的随机整数的Tensor，值区间：[low, high]\n",
    "torch.randn([3,4]) 均值为0，方差为1,3*4的tensor\n",
    "\"\"\"\n",
    "print(torch.empty(3, 4))\n",
    "print(torch.ones([3, 4]))\n",
    "print(torch.zeros([3, 4]))\n",
    "print(torch.rand([3, 4]))"
   ],
   "id": "48a72c7d1d41298a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.]])\n",
      "tensor([[1., 1., 1., 1.],\n",
      "        [1., 1., 1., 1.],\n",
      "        [1., 1., 1., 1.]])\n",
      "tensor([[0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.]])\n",
      "tensor([[0.3697, 0.0557, 0.5837, 0.2925],\n",
      "        [0.3662, 0.0208, 0.1766, 0.0881],\n",
      "        [0.8281, 0.3833, 0.1269, 0.5149]])\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 3. 张量的方法和属性\n",
    "    1.tensor.item(),当tensor中只有一个元素可以用的时候\n",
    "    2.Tensor转为ndarray\n",
    "    3.形状修改，tensor.view((3, 4)), 类似numpy中的reshape,是一种浅拷贝\n",
    "    4.获取维数、转置、轴滚动。\n",
    "    5.在方法后加_，会原地修改，相当于Tensorflow里的inplace"
   ],
   "id": "a5e83ae0814bb35e"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:16:36.754009Z",
     "start_time": "2025-02-08T01:16:36.750003Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 创建一个包含单个元素的PyTorch张量\n",
    "# 通过np.array(1)生成一个NumPy标量数组（shape为()）\n",
    "# torch.tensor() 将NumPy数组转换为PyTorch张量\n",
    "# 此时 a 是一个零维（标量）张量，包含值1\n",
    "a = torch.tensor(np.array(1))\n",
    "\n",
    "# 使用.item()方法提取张量中的标量值\n",
    "# .item() 会将单元素张量转换为Python原生数据类型（此处是int）\n",
    "# 注意：当且仅当张量包含单个元素时才能使用.item()\n",
    "print(a.item())  # 输出：1（Python原生整数）\n",
    "\n",
    "# 直接打印张量对象\n",
    "# 这会显示张量的完整信息，包括值、数据类型和设备位置（CPU/GPU）\n",
    "print(a)  # 输出：tensor(1)（PyTorch张量对象）\n",
    "\n",
    "# 多维张量也只输出一个值\n",
    "print(torch.Tensor([[[1]]]).item())"
   ],
   "id": "47c2545290d53d9b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1\n",
      "tensor(1, dtype=torch.int32)\n",
      "1.0\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:20:55.446250Z",
     "start_time": "2025-02-08T01:20:55.442288Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 张量转为ndarray\n",
    "\n",
    "# 创建一个PyTorch张量（注意：torch.Tensor是类构造函数，torch.tensor是工厂函数）\n",
    "# 输入二维列表 [[3, 4]]，生成形状为 (1, 2) 的浮点型张量（默认float32）\n",
    "t2 = torch.Tensor([[3, 4]])  # 等效于 torch.tensor([[3, 4]], dtype=torch.float32)\n",
    "\n",
    "# 将PyTorch张量转换为NumPy数组（共享内存）\n",
    "array2 = t2.numpy()  # 当张量在CPU时，与NumPy数组共享底层数据存储\n",
    "print(array2)  # 输出：[[3. 4.]]（二维数组，注意自动转换为float32）\n",
    "\n",
    "# 打印张量的形状属性（两种等效方式）\n",
    "print(t2.shape)  # 输出：torch.Size([1, 2])\n",
    "print(t2.size())  # 输出：torch.Size([1, 2])，与shape完全相同\n",
    "\n",
    "# 获取特定维度的大小\n",
    "print(t2.size(0))  # 输出：1（第0维的大小，即行数）"
   ],
   "id": "2fd31c249eec4f97",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[3. 4.]]\n",
      "torch.Size([1, 2])\n",
      "torch.Size([1, 2])\n",
      "1\n"
     ]
    }
   ],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:21:41.549416Z",
     "start_time": "2025-02-08T01:21:41.546544Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 写一个ndarray\n",
    "array1 = np.array([[1, 2, 3], [4, 5, 6]])\n",
    "print(id(array1))\n",
    "array2 = array1.reshape(3, 2)\n",
    "print(id(array2))"
   ],
   "id": "abb4d52e9a85fa27",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1928605724144\n",
      "1928605802704\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:21:52.525048Z",
     "start_time": "2025-02-08T01:21:52.521699Z"
    }
   },
   "cell_type": "code",
   "source": [
    "array2[0, 0] = 100\n",
    "print(array1)\n",
    "print(array2)"
   ],
   "id": "6ed96e83bde1210d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[100   2   3]\n",
      " [  4   5   6]]\n",
      "[[100   2]\n",
      " [  3   4]\n",
      " [  5   6]]\n"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:21:56.247888Z",
     "start_time": "2025-02-08T01:21:56.242950Z"
    }
   },
   "cell_type": "code",
   "source": "array1.ndim  #获取维数",
   "id": "93b452b824bee058",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:22:08.449502Z",
     "start_time": "2025-02-08T01:22:08.445323Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 3.3.形状修改，tensor.view((3, 4)), 类似numpy中的reshape,是一种浅拷贝，仅仅形状发生改变,返回一个新的结果\n",
    "t2 = torch.Tensor([[[3, 4]]])\n",
    "print(t2.size())\n",
    "print(t2.view([1, 2]))  #[1,2]表示1行2列\n",
    "print(t2.view([2]))  # 一维tensor"
   ],
   "id": "cb360e52fe8fd09e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 1, 2])\n",
      "tensor([[3., 4.]])\n",
      "tensor([3., 4.])\n"
     ]
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:22:43.199760Z",
     "start_time": "2025-02-08T01:22:43.194361Z"
    }
   },
   "cell_type": "code",
   "source": [
    "b = t2.view([2, -1])  # -1表示自动计算\n",
    "print(b)\n",
    "print('-' * 50)\n",
    "print(t2)  #t2的形状并没有发生改变\n",
    "# https://pytorch.org/docs/stable/tensor_view.html\n",
    "t2.untyped_storage().untyped().data_ptr() == b.untyped_storage().untyped().data_ptr()  #判断两个tensor是否共享内存"
   ],
   "id": "dda4d04fb6f3c403",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[3.],\n",
      "        [4.]])\n",
      "--------------------------------------------------\n",
      "tensor([[[3., 4.]]])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:23:09.901028Z",
     "start_time": "2025-02-08T01:23:09.895864Z"
    }
   },
   "cell_type": "code",
   "source": [
    "b[0, 0] = 100\n",
    "print(b)\n",
    "print(t2)  #t2的形状并没有发生改变"
   ],
   "id": "4c27ec3ad310e266",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[100.],\n",
      "        [  4.]])\n",
      "tensor([[[100.,   4.]]])\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:23:15.505272Z",
     "start_time": "2025-02-08T01:23:15.498993Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#3. 获取维数\n",
    "print(t2.dim())\n",
    "\n",
    "#4.获取最大值\n",
    "print(t2.max())\n",
    "\n",
    "#5.转置\n",
    "t3 = torch.tensor([[1, 3, 4], [2, 4, 6]])\n",
    "print(t3)\n",
    "print(t3.t())  #转置\n"
   ],
   "id": "e81c6e963d6fc2c2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3\n",
      "tensor(100.)\n",
      "tensor([[1, 3, 4],\n",
      "        [2, 4, 6]])\n",
      "tensor([[1, 2],\n",
      "        [3, 4],\n",
      "        [4, 6]])\n"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:23:44.746949Z",
     "start_time": "2025-02-08T01:23:44.741947Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 交换轴,这里的permute和rollaxis功能类型\n",
    "t4 = torch.tensor(np.arange(24).reshape(2, 3, 4))\n",
    "print(t4.shape)\n",
    "print(\"-\" * 50)\n",
    "print(t4.transpose(0, 1).shape)  #交换0轴和1轴\n",
    "print(\"-\" * 50)\n",
    "print(t4.permute(1, 0, 2).shape)  #交换0轴和1轴,功能同上\n",
    "print(\"-\" * 50)\n",
    "print(t4.permute(1, 2, 0).shape)  #变为了3*4*2\n",
    "print(\"-\" * 50)\n",
    "print(t4.permute(2, 1, 0).shape)  #变为了4*3*2"
   ],
   "id": "7a2b4841f31039d9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 3, 4])\n",
      "--------------------------------------------------\n",
      "torch.Size([3, 2, 4])\n",
      "--------------------------------------------------\n",
      "torch.Size([3, 2, 4])\n",
      "--------------------------------------------------\n",
      "torch.Size([3, 4, 2])\n",
      "--------------------------------------------------\n",
      "torch.Size([4, 3, 2])\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:24:01.746237Z",
     "start_time": "2025-02-08T01:24:01.738237Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 在方法后加_，会原地修改\n",
    "x = torch.tensor(np.arange(12).reshape(3, 4), dtype=torch.int8)\n",
    "print(x)\n",
    "y = torch.ones([3, 4], dtype=torch.int64)\n",
    "print(y)\n",
    "\n",
    "print('-' * 50)\n",
    "x.sub_(y)  # add_就地修改，不加下划线的会创建一个新的tensor来存储\n",
    "print(x)"
   ],
   "id": "399308792c54d3ba",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0,  1,  2,  3],\n",
      "        [ 4,  5,  6,  7],\n",
      "        [ 8,  9, 10, 11]], dtype=torch.int8)\n",
      "tensor([[1, 1, 1, 1],\n",
      "        [1, 1, 1, 1],\n",
      "        [1, 1, 1, 1]])\n",
      "--------------------------------------------------\n",
      "tensor([[-1,  0,  1,  2],\n",
      "        [ 3,  4,  5,  6],\n",
      "        [ 7,  8,  9, 10]], dtype=torch.int8)\n"
     ]
    }
   ],
   "execution_count": 24
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:24:06.481209Z",
     "start_time": "2025-02-08T01:24:06.472812Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#tensor取值,和np完全相同\n",
    "t5 = torch.tensor(np.arange(12).reshape(3, 4))\n",
    "print(t5)\n",
    "print(t5[1, 2])  #取值\n",
    "print(t5[1])  #取一行\n",
    "print(t5[:, 1])  #取一列\n",
    "print(t5[1:3, 1:3])  #取一部分\n",
    "print(t5[1:3, :])  #取一部分"
   ],
   "id": "f2fc1ec0da0cc5c9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0,  1,  2,  3],\n",
      "        [ 4,  5,  6,  7],\n",
      "        [ 8,  9, 10, 11]], dtype=torch.int32)\n",
      "tensor(6, dtype=torch.int32)\n",
      "tensor([4, 5, 6, 7], dtype=torch.int32)\n",
      "tensor([1, 5, 9], dtype=torch.int32)\n",
      "tensor([[ 5,  6],\n",
      "        [ 9, 10]], dtype=torch.int32)\n",
      "tensor([[ 4,  5,  6,  7],\n",
      "        [ 8,  9, 10, 11]], dtype=torch.int32)\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:24:11.711061Z",
     "start_time": "2025-02-08T01:24:11.702702Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#两个16行，1列的张量相减，求均值\n",
    "t6 = torch.tensor(np.arange(16).reshape(16, 1), dtype=torch.float32)\n",
    "t7 = torch.tensor(np.arange(16, 32).reshape(16, 1), dtype=torch.float32)\n",
    "print(t6)\n",
    "print(t7)\n",
    "((t6 - t7) ** 2).mean()"
   ],
   "id": "86ff7c9c753d4a0f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0.],\n",
      "        [ 1.],\n",
      "        [ 2.],\n",
      "        [ 3.],\n",
      "        [ 4.],\n",
      "        [ 5.],\n",
      "        [ 6.],\n",
      "        [ 7.],\n",
      "        [ 8.],\n",
      "        [ 9.],\n",
      "        [10.],\n",
      "        [11.],\n",
      "        [12.],\n",
      "        [13.],\n",
      "        [14.],\n",
      "        [15.]])\n",
      "tensor([[16.],\n",
      "        [17.],\n",
      "        [18.],\n",
      "        [19.],\n",
      "        [20.],\n",
      "        [21.],\n",
      "        [22.],\n",
      "        [23.],\n",
      "        [24.],\n",
      "        [25.],\n",
      "        [26.],\n",
      "        [27.],\n",
      "        [28.],\n",
      "        [29.],\n",
      "        [30.],\n",
      "        [31.]])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor(256.)"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 26
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:24:34.505804Z",
     "start_time": "2025-02-08T01:24:34.499793Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#初始化两个张量，一个3,4，一个3,1,运算与ndarray相同\n",
    "t8 = torch.tensor(np.arange(12).reshape(3, 4), dtype=torch.float32)\n",
    "t9 = torch.tensor(np.arange(3).reshape(3, 1), dtype=torch.float32)\n",
    "print(t8)\n",
    "print(t9)\n",
    "t8 - t9"
   ],
   "id": "9e42bae4139365da",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 0.,  1.,  2.,  3.],\n",
      "        [ 4.,  5.,  6.,  7.],\n",
      "        [ 8.,  9., 10., 11.]])\n",
      "tensor([[0.],\n",
      "        [1.],\n",
      "        [2.]])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[0., 1., 2., 3.],\n",
       "        [3., 4., 5., 6.],\n",
       "        [6., 7., 8., 9.]])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 27
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:24:49.828077Z",
     "start_time": "2025-02-08T01:24:49.713175Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\"\"\"\n",
    "GPU中tensor的使用：\n",
    "1.实例化device， torch.device(\"cpu\" or \"cuda:0\"),选cuda:0需要torch.cuda.is_available()==true\n",
    "2.tensor.to(device),把tensor转化为CUDA支持的tensor或者CPU支持的tensor\n",
    "\"\"\"\n",
    "print(torch.cuda.is_available())\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device(\"cuda\")  #cuda device对象\n",
    "    y = torch.ones_like(x, device=device)  #创建一个在cuda上的tensor\n",
    "    x = x.to(device)  #把x转为cuda上的tensor\n",
    "    z = x + y\n",
    "    print(z)\n",
    "    print(z.to(\"cpu\", torch.double))\n",
    "\n",
    "# torch.tensor([1.9806], device = \"cuda:0\")\n",
    "device = torch.device(\"cpu\")\n",
    "x.to(device)"
   ],
   "id": "f906de63b4b5d2da",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n",
      "tensor([[ 0,  1,  2,  3],\n",
      "        [ 4,  5,  6,  7],\n",
      "        [ 8,  9, 10, 11]], device='cuda:0', dtype=torch.int8)\n",
      "tensor([[ 0.,  1.,  2.,  3.],\n",
      "        [ 4.,  5.,  6.,  7.],\n",
      "        [ 8.,  9., 10., 11.]], dtype=torch.float64)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-1,  0,  1,  2],\n",
       "        [ 3,  4,  5,  6],\n",
       "        [ 7,  8,  9, 10]], dtype=torch.int8)"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 28
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:25:32.947389Z",
     "start_time": "2025-02-08T01:25:32.942077Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 张量的运算\n",
    "t8 = torch.tensor(np.arange(12).reshape(3, 4), dtype=torch.float32)\n",
    "t9 = torch.tensor(np.arange(12).reshape(3, 4), dtype=torch.float32)\n",
    "t8 * t9"
   ],
   "id": "220c627e583a9c82",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[  0.,   1.,   4.,   9.],\n",
       "        [ 16.,  25.,  36.,  49.],\n",
       "        [ 64.,  81., 100., 121.]])"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 29
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-08T01:25:38.325494Z",
     "start_time": "2025-02-08T01:25:38.299495Z"
    }
   },
   "cell_type": "code",
   "source": "torch.mm(t8, t9.transpose(0, 1))  #矩阵乘法",
   "id": "58ab9d045cf2a544",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 14.,  38.,  62.],\n",
       "        [ 38., 126., 214.],\n",
       "        [ 62., 214., 366.]])"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 30
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "fa1128a18d39c975"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
