{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a256d2d8a29856de",
   "metadata": {},
   "source": [
    "## 注意力机制"
   ]
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "ExecuteTime": {
     "end_time": "2025-06-18T03:39:02.342947Z",
     "start_time": "2025-06-18T03:39:02.336424Z"
    }
   },
   "source": [
    "import torch\n",
    "\n",
    "# i表示输入的一个seq，i1、i2、i3、i4表示seq中的4个token，每个token的词向量是3维向量\n",
    "i1 = torch.tensor([1, 0, 0]).float()\n",
    "i2 = torch.tensor([0, 1, 0]).float()\n",
    "i3 = torch.tensor([0, 0, 1]).float()\n",
    "i4 = torch.tensor([0, 0, 2]).float()\n",
    "\n",
    "# w是二维矩阵\n",
    "wq = torch.tensor([[1, 0, 0], [0, 1, 0]]).float()\n",
    "wk = torch.tensor([[0, 1, 0], [0, 1, 0]]).float()\n",
    "\n",
    "\n",
    "# 矩阵乘以向量，需要矩阵列数 = 向量长度，结果是一个向量，结果向量的每个元素是矩阵的行与向量的逐元素乘积之和，结果向量长度为矩阵的行数\n",
    "# 向量乘以矩阵，需要向量长度 = 矩阵的行数，把向量看成是一个一维矩阵(1, 3)，矩阵为(3, 3)，结果仍然是一个向量，结果向量长度为矩阵的列数\n",
    "q1 = torch.matmul(wq, i1) # 由于i1是一个向量，所以q1是一个向量\n",
    "\n",
    "# 同理，k1、k2、k3、k4都是向量\n",
    "k1 = torch.matmul(wk, i1)\n",
    "k2 = torch.matmul(wk, i2)\n",
    "k3 = torch.matmul(wk, i3)\n",
    "k4 = torch.matmul(wk, i4)\n",
    "\n",
    "# q和k都是向量，所以是两个向量的点积，也就是dot product，结果为一个标量，得到注意力分数\n",
    "attention_score_11 = torch.matmul(q1, k1)\n",
    "attention_score_12 = torch.matmul(q1, k2)\n",
    "attention_score_13 = torch.matmul(q1, k3)\n",
    "attention_score_14 = torch.matmul(q1, k4)\n",
    "\n",
    "# 经过softmax得到注意力权重\n",
    "# 哪个k的权重大，那就表示当前q更关注这个k\n",
    "# 最终模型训练后，就是要找到q最应该要关注的那个k\n",
    "attention_weight_q1 = torch.softmax(torch.tensor([attention_score_11, attention_score_12, attention_score_13, attention_score_14]), dim=0)\n",
    "\n",
    "attention_weight_q1"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([0.1749, 0.4754, 0.1749, 0.1749])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 7
  },
  {
   "cell_type": "markdown",
   "id": "6fe3d071391a372e",
   "metadata": {},
   "source": [
    "以目前的wq和wk，发现q1更关注k2，也就代表i1更关注i2,如果需要调整，那就调整wq和wk即可。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "a25fc85ac600088d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.520084Z",
     "start_time": "2025-05-24T13:23:04.515983Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.5246, 0.4754])"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wv = torch.tensor([[0, 0, 1], [0, 1, 0]]).float() # wv是一个矩阵\n",
    "\n",
    "# 都是向量\n",
    "v1 = torch.matmul(wv, i1)\n",
    "v2 = torch.matmul(wv, i2)\n",
    "v3 = torch.matmul(wv, i3)\n",
    "v4 = torch.matmul(wv, i4)\n",
    "\n",
    "# attention_weight_q1[0]是一个标量\n",
    "o1 = attention_weight_q1[0] * v1 + attention_weight_q1[1] * v2 + attention_weight_q1[2] * v3 + attention_weight_q1[3] * v4\n",
    "o1"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e9218074aaa7fafb",
   "metadata": {},
   "source": [
    "相当于，根据i1，最终得到了o1，o1肯定跟i1有关系，同时也关注了i2,i3,i4，所以o1中包含的信息是很多的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "e757b75a0656bee9",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.535331Z",
     "start_time": "2025-05-24T13:23:04.532076Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 0., 0.],\n",
       "        [0., 1., 0.],\n",
       "        [0., 0., 1.],\n",
       "        [0., 0., 2.]])"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 把i1, i2, i3, i4变成二维矩阵(4, 3)\n",
    "i = torch.stack([i1, i2, i3, i4], dim=0)\n",
    "i"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "5e7ae3174d7b892",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.570944Z",
     "start_time": "2025-05-24T13:23:04.567589Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1., 0., 0., 0.],\n",
       "        [0., 1., 0., 0.]])"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "q = torch.matmul(wq, i.T) # 4列分别代表q1，q2，q3，q4\n",
    "q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "67a2297cc3d4426c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.626483Z",
     "start_time": "2025-05-24T13:23:04.623495Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., 1., 0., 0.],\n",
       "        [0., 1., 0., 0.]])"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "k = torch.matmul(wk, i.T)\n",
    "k"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "e5d7c121f1884056",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.688077Z",
     "start_time": "2025-05-24T13:23:04.685068Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., 0.],\n",
       "        [1., 1.],\n",
       "        [0., 0.],\n",
       "        [0., 0.]])"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 4行分别代表k1，k2，k3，k4\n",
    "k.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "a6c8d97b0728886c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.772491Z",
     "start_time": "2025-05-24T13:23:04.767901Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., 0., 1., 2.],\n",
       "        [0., 1., 0., 0.]])"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "v = torch.matmul(wv, i.T)\n",
    "v"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "b51633457565bd19",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.840340Z",
     "start_time": "2025-05-24T13:23:04.837069Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0., 0., 0., 0.],\n",
       "        [1., 1., 0., 0.],\n",
       "        [0., 0., 0., 0.],\n",
       "        [0., 0., 0., 0.]])"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "qk = torch.matmul(k.T, q) # 00表示k1*q1, 01表示k1*q2, 02表示k1*q3...\n",
    "qk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "226835415b03094",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:04.913475Z",
     "start_time": "2025-05-24T13:23:04.910631Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.1749, 0.1749, 0.2500, 0.2500],\n",
       "        [0.4754, 0.4754, 0.2500, 0.2500],\n",
       "        [0.1749, 0.1749, 0.2500, 0.2500],\n",
       "        [0.1749, 0.1749, 0.2500, 0.2500]])"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# dim=0表示列，表示按列做softmax, dim=1表示行\n",
    "# 第0列表示q1和k1、k2、k3、k4的注意力权重\n",
    "attention_weight = torch.softmax(qk, dim=0)\n",
    "attention_weight"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "2734e4e2305904d0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.011577Z",
     "start_time": "2025-05-24T13:23:05.008304Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.5246, 0.5246, 0.7500, 0.7500],\n",
       "        [0.4754, 0.4754, 0.2500, 0.2500]])"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "o = torch.matmul(v, attention_weight)\n",
    "o"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "c4928fa9453dc300",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.079928Z",
     "start_time": "2025-05-24T13:23:05.074696Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.5246, 0.5246, 0.7500, 0.7500],\n",
       "        [0.4754, 0.4754, 0.2500, 0.2500]])"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# i表示输入的一个seq，i1、i2、i3、i4表示seq中的4个token，每个token的词向量是3维向量\n",
    "i1 = torch.tensor([1, 0, 0]).float()\n",
    "i2 = torch.tensor([0, 1, 0]).float()\n",
    "i3 = torch.tensor([0, 0, 1]).float()\n",
    "i4 = torch.tensor([0, 0, 2]).float()\n",
    "\n",
    "i = torch.stack([i1, i2, i3, i4], dim=0)\n",
    "\n",
    "# w是二维矩阵\n",
    "wq = torch.tensor([[1, 0, 0], [0, 1, 0]]).float()\n",
    "wk = torch.tensor([[0, 1, 0], [0, 1, 0]]).float()\n",
    "wv = torch.tensor([[0, 0, 1], [0, 1, 0]]).float()\n",
    "\n",
    "q = torch.matmul(wq, i.T)\n",
    "k = torch.matmul(wk, i.T)\n",
    "v = torch.matmul(wv, i.T)\n",
    "qk = torch.matmul(k.T, q)\n",
    "o = torch.matmul(v, torch.softmax(qk, dim=0))\n",
    "o"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "3fb6ef4008e773e7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.142989Z",
     "start_time": "2025-05-24T13:23:05.140057Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([2, 4]), torch.Size([2, 4]), torch.Size([2, 4]))"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "q.shape, k.shape, v.shape,"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "f28db25d54456021",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.184007Z",
     "start_time": "2025-05-24T13:23:05.181432Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([4, 3]), torch.Size([2, 4]))"
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "i.shape, o.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "517e7dcc771341d6",
   "metadata": {},
   "source": [
    "i相当于输入，比如一个序列中的某个token，v就相当于i\n",
    "\n",
    "q是用来和其他token的k来计算注意力权重的\n",
    "\n",
    "最终的输出o，就相当于包含了i以及和i其他token的注意力信息\n",
    "\n",
    "所谓注意力，就可以理解为某个token和另外token之间的相关性，注意力权重越高就越相关\n",
    "\n",
    "不管序列有多长，总是能计算某个token和其他token之间的注意力的，并且是可以并行的\n",
    "\n",
    "整个过程，只有wq、wk、wv需要进行训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "ddad900375d608e7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.208908Z",
     "start_time": "2025-05-24T13:23:05.203787Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.8191, 0.6401, 0.4639, 0.9759],\n",
       "        [0.5611, 0.4388, 0.3170, 0.6634]])"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# i表示输入的一个seq，i1、i2、i3、i4表示seq中的4个token，每个token的词向量是3维向量\n",
    "i1 = torch.rand(1, 3)\n",
    "i2 = torch.rand(1, 3)\n",
    "i3 = torch.rand(1, 3)\n",
    "i4 = torch.rand(1, 3)\n",
    "\n",
    "i = torch.cat([i1, i2, i3, i4], dim=0) # i的形状是(4, 3)，表示i中有4个token，每个token的形状是的向量长度为3\n",
    "\n",
    "# w是二维矩阵\n",
    "wq = torch.rand(2, 3)\n",
    "wk = torch.rand(2, 3)\n",
    "wv = torch.rand(2, 3)\n",
    "\n",
    "q = torch.matmul(wq, i.T)  # wq的形状是(2, 3), i.T的形状(3, 4), q的形状是(2, 4)\n",
    "k = torch.matmul(wk, i.T)  # wk的形状是(2, 3), i.T的形状(3, 4), k的形状是(3, 4)\n",
    "v = torch.matmul(wv, i.T)  # wv的形状是(2, 3), i.T的形状(3, 4), v的形状是(3, 4)\n",
    "qk = torch.matmul(q, k.T)  # q的形状是(2, 4), k.T的形状是(4, 3), qk的形状是(2, 3)\n",
    "o = torch.matmul(torch.softmax(qk, dim=0), v)  # qk的形状是(2, 3), v的形状是(3, 4), o的形状是(2, 4), 输出是q的行，v的列\n",
    "o"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "c8cd681250e121ee",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.254121Z",
     "start_time": "2025-05-24T13:23:05.246379Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "输入张量x:\n",
      "tensor([[1., 0., 0.],\n",
      "        [0., 1., 0.],\n",
      "        [0., 0., 1.],\n",
      "        [0., 0., 2.]])\n",
      "形状: torch.Size([4, 3])\n",
      "\n",
      "\n",
      "新Q矩阵:\n",
      "tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 0.],\n",
      "        [0., 0.]], grad_fn=<MmBackward0>)\n",
      "\n",
      "新K矩阵:\n",
      "tensor([[0., 0.],\n",
      "        [1., 1.],\n",
      "        [0., 0.],\n",
      "        [0., 0.]], grad_fn=<MmBackward0>)\n",
      "\n",
      "新V矩阵:\n",
      "tensor([[0., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [2., 0.]], grad_fn=<MmBackward0>)\n",
      "\n",
      "新注意力权重矩阵:\n",
      "tensor([[0.1749, 0.4754, 0.1749, 0.1749],\n",
      "        [0.1749, 0.4754, 0.1749, 0.1749],\n",
      "        [0.2500, 0.2500, 0.2500, 0.2500],\n",
      "        [0.2500, 0.2500, 0.2500, 0.2500]], grad_fn=<SoftmaxBackward0>)\n",
      "\n",
      "新输出张量:\n",
      "tensor([[0.5246, 0.4754],\n",
      "        [0.5246, 0.4754],\n",
      "        [0.7500, 0.2500],\n",
      "        [0.7500, 0.2500]], grad_fn=<MmBackward0>)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "# 输入序列：4个token，每个词向量维度为3\n",
    "x = torch.tensor([[1, 0, 0],\n",
    "                  [0, 1, 0],\n",
    "                  [0, 0, 1],\n",
    "                  [0, 0, 2]], dtype=torch.float32)\n",
    "\n",
    "print(\"输入张量x:\")\n",
    "print(x)\n",
    "print(\"形状:\", x.shape)\n",
    "print(\"\\n\")\n",
    "\n",
    "q_layer = nn.Linear(3, 2, bias=False)\n",
    "k_layer = nn.Linear(3, 2, bias=False)\n",
    "v_layer = nn.Linear(3, 2, bias=False)\n",
    "\n",
    "# 手动初始化权重为 (2,3) 矩阵\n",
    "with torch.no_grad():\n",
    "    q_layer.weight.data = torch.tensor([[1,0,0],\n",
    "                                        [0,1,0]], dtype=torch.float32)\n",
    "    k_layer.weight.data = torch.tensor([[0,1,0],\n",
    "                                        [0,1,0]], dtype=torch.float32)\n",
    "    v_layer.weight.data = torch.tensor([[0,0,1],\n",
    "                                        [0,1,0]], dtype=torch.float32)\n",
    "\n",
    "# 生成新的Q/K/V\n",
    "Q = q_layer(x)  # shape: (4,2)\n",
    "K = k_layer(x)  # shape: (4,2)\n",
    "V = v_layer(x)  # shape: (4,2)\n",
    "\n",
    "print(\"新Q矩阵:\")\n",
    "print(Q)\n",
    "print(\"\\n新K矩阵:\")\n",
    "print(K)\n",
    "print(\"\\n新V矩阵:\")\n",
    "print(V)\n",
    "\n",
    "# 计算注意力分数\n",
    "d_k = Q.size(-1)  # 2\n",
    "attn_scores = torch.matmul(Q, K.T)\n",
    "attn_weights = torch.softmax(attn_scores, dim=-1)\n",
    "\n",
    "print(\"\\n新注意力权重矩阵:\")\n",
    "print(attn_weights)\n",
    "\n",
    "# 最终输出\n",
    "output = torch.matmul(attn_weights, V)\n",
    "print(\"\\n新输出张量:\")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cedfe863b64ffcbf",
   "metadata": {},
   "source": [
    "输入是4个token，输出也是4个token\n",
    "\n",
    "我上给的两个例子中，i和w相乘是，w的维度其实要转一下，这样最终输入和输出的维度才能对上"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "6c950b653bd20aea",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T13:23:05.292124Z",
     "start_time": "2025-05-24T13:23:05.287426Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.6334, 0.8686],\n",
       "        [0.7806, 1.0696],\n",
       "        [0.9217, 1.2621],\n",
       "        [0.6073, 0.8314]])"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "# i表示输入的一个seq，i1、i2、i3、i4表示seq中的4个token，每个token的词向量是3维向量\n",
    "i1 = torch.rand(1, 3)\n",
    "i2 = torch.rand(1, 3)\n",
    "i3 = torch.rand(1, 3)\n",
    "i4 = torch.rand(1, 3)\n",
    "\n",
    "# 合并i1、i2、i3、i4\n",
    "i = torch.cat([i1, i2, i3, i4], dim=0) # i的形状是(4, 3)，表示i中有4个token，每个token的形状是的向量长度为3\n",
    "\n",
    "# w是二维矩阵\n",
    "wq = torch.rand(3, 2)\n",
    "wk = torch.rand(3, 2)\n",
    "wv = torch.rand(3, 2)\n",
    "\n",
    "q = torch.matmul(i, wq)  # wq的形状是(3, 2), i的形状(4, 3), q的形状是(4, 2)\n",
    "k = torch.matmul(i, wk)  # wk的形状是(3, 2), i的形状(4, 3), k的形状是(4, 2)\n",
    "v = torch.matmul(i, wv)  # wv的形状是(3, 2), i的形状(4, 3), v的形状是(4, 2)\n",
    "qk = torch.matmul(q, k.T)  # q的形状是(4, 2), k.T的形状是(2, 4), qk的形状是(4, 4)\n",
    "o = torch.matmul(torch.softmax(qk, dim=0), v)  # qk的形状是(4, 4), v的形状是(4, 2), o的形状是(4, 2), 输出是q的行，v的列, q的行等于i的行，这就对上了\n",
    "o"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
