{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a256d2d8a29856de",
   "metadata": {},
   "source": "## Self-attention自注意力机制"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## Attention Is All You Need\n",
    "https://arxiv.org/html/1706.03762v7\n",
    "\n",
    "2017年的论文"
   ],
   "id": "8798a87948e8a290"
  },
  {
   "cell_type": "code",
   "id": "dd8d7671a6993318",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:51:29.785473Z",
     "start_time": "2025-07-08T07:51:29.779032Z"
    }
   },
   "source": [
    "import torch\n",
    "\n",
    "# x表示输入的一个seq，x1、x2、x3、x4表示seq中的4个token，每个token的词向量长度为3\n",
    "x1 = torch.tensor([1.1, 1.2, 1.3]).float()\n",
    "x2 = torch.tensor([2.1, 2.2, 2.3]).float()\n",
    "x3 = torch.tensor([3.1, 3.2, 3.3]).float()\n",
    "x4 = torch.tensor([4.1, 4.2, 4.3]).float()\n",
    "\n",
    "# wq、wk、wv都是权重矩阵，用来学习注意力分布的\n",
    "wq = torch.rand(3, 3)\n",
    "wk = torch.rand(3, 3)\n",
    "wv = torch.rand(3, 3)"
   ],
   "outputs": [],
   "execution_count": 66
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "每一个token经过权重计算后都会得到对应的q、k、v",
   "id": "7b55bfd74bf82af9"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:51:31.572043Z",
     "start_time": "2025-07-08T07:51:31.567436Z"
    }
   },
   "cell_type": "code",
   "source": [
    "q1 = torch.matmul(x1, wq)\n",
    "q2 = torch.matmul(x2, wq)\n",
    "q3 = torch.matmul(x3, wq)\n",
    "q4 = torch.matmul(x4, wq)\n",
    "\n",
    "k1 = torch.matmul(x1, wk)\n",
    "k2 = torch.matmul(x2, wk)\n",
    "k3 = torch.matmul(x3, wk)\n",
    "k4 = torch.matmul(x4, wk)\n",
    "\n",
    "v1 = torch.matmul(x1, wv)\n",
    "v2 = torch.matmul(x2, wv)\n",
    "v3 = torch.matmul(x3, wv)\n",
    "v4 = torch.matmul(x4, wv)"
   ],
   "id": "e930f8e538ef57ac",
   "outputs": [],
   "execution_count": 67
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:51:36.512470Z",
     "start_time": "2025-07-08T07:51:36.508261Z"
    }
   },
   "cell_type": "code",
   "source": "q1, k1, v1",
   "id": "d0d599806ad7e969",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([1.4568, 2.2420, 1.6187]),\n",
       " tensor([0.8648, 1.8542, 1.7760]),\n",
       " tensor([2.1730, 1.7953, 1.6097]))"
      ]
     },
     "execution_count": 68,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 68
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "先计算q1和k1、k2、k3、k4的注意力分数",
   "id": "77dfe9f34c622394"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:54:05.395269Z",
     "start_time": "2025-07-08T07:54:05.392649Z"
    }
   },
   "cell_type": "code",
   "source": [
    "attention_score_11 = torch.matmul(q1, k1)\n",
    "attention_score_12 = torch.matmul(q1, k2)\n",
    "attention_score_13 = torch.matmul(q1, k3)\n",
    "attention_score_14 = torch.matmul(q1, k4)"
   ],
   "id": "14a973d606efc903",
   "outputs": [],
   "execution_count": 74
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:54:06.380805Z",
     "start_time": "2025-07-08T07:54:06.376990Z"
    }
   },
   "cell_type": "code",
   "source": "attention_score_11, attention_score_12, attention_score_13, attention_score_14",
   "id": "237557cc67bd4f5b",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(8.2921), tensor(15.1280), tensor(21.9640), tensor(28.8000))"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 75
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "进行缩放",
   "id": "af25e0b57e55099e"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:54:41.576421Z",
     "start_time": "2025-07-08T07:54:41.573398Z"
    }
   },
   "cell_type": "code",
   "source": "k1.size(-1)",
   "id": "1d65e83e048296d6",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3"
      ]
     },
     "execution_count": 76,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 76
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:54:58.525267Z",
     "start_time": "2025-07-08T07:54:58.521573Z"
    }
   },
   "cell_type": "code",
   "source": [
    "dk = k1.size(-1)\n",
    "attention_score_11 = attention_score_11 / torch.sqrt(torch.tensor(dk))\n",
    "attention_score_12 = attention_score_12 / torch.sqrt(torch.tensor(dk))\n",
    "attention_score_13 = attention_score_13 / torch.sqrt(torch.tensor(dk))\n",
    "attention_score_14 = attention_score_14 / torch.sqrt(torch.tensor(dk))"
   ],
   "id": "c189d45a3fa02496",
   "outputs": [],
   "execution_count": 77
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:55:01.002318Z",
     "start_time": "2025-07-08T07:55:00.998441Z"
    }
   },
   "cell_type": "code",
   "source": "attention_score_11, attention_score_12, attention_score_13, attention_score_14",
   "id": "f0d97068c5b92830",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor(4.7874), tensor(8.7342), tensor(12.6809), tensor(16.6277))"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 78
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "softmax注意力分数得到q1和k1、k2、k3、k4的注意力权重",
   "id": "8fced8f75cdd67d1"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:55:27.842548Z",
     "start_time": "2025-07-08T07:55:27.838831Z"
    }
   },
   "cell_type": "code",
   "source": [
    "attention_weight_q1 = torch.softmax(\n",
    "    torch.tensor([attention_score_11, attention_score_12, attention_score_13, attention_score_14]), dim=0)\n",
    "\n",
    "attention_weight_q1"
   ],
   "id": "initial_id",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([7.0690e-06, 3.6594e-04, 1.8944e-02, 9.8068e-01])"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 79
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "注意力权重乘以v，得到q1的注意力输出，也是一个向量，输入的x1是一个词向量，输出的o1也是一个向量，是一个包含了注意力信息的向量",
   "id": "7b880810618e77b5"
  },
  {
   "cell_type": "code",
   "id": "a25fc85ac600088d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T07:56:15.839416Z",
     "start_time": "2025-07-08T07:56:15.835378Z"
    }
   },
   "source": [
    "o1 = attention_weight_q1[0] * v1 + attention_weight_q1[1] * v2 + attention_weight_q1[2] * v3 + attention_weight_q1[\n",
    "    3] * v4\n",
    "o1"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([7.6371, 6.2040, 5.6893])"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 80
  },
  {
   "cell_type": "code",
   "id": "e757b75a0656bee9",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:16.829671Z",
     "start_time": "2025-07-08T08:10:16.826091Z"
    }
   },
   "source": [
    "# x表示一个输入的seq\n",
    "x = torch.rand(4, 3)\n",
    "x"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.8676, 0.3806, 0.7488],\n",
       "        [0.4281, 0.2807, 0.4111],\n",
       "        [0.7204, 0.1757, 0.7430],\n",
       "        [0.4438, 0.3186, 0.8127]])"
      ]
     },
     "execution_count": 104,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 104
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:18.646932Z",
     "start_time": "2025-07-08T08:10:18.644353Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# wq、wk、wv都是权重矩阵，用来学习注意力分布的\n",
    "wq = torch.rand(3, 5)\n",
    "wk = torch.rand(3, 5)\n",
    "wv = torch.rand(3, 5)"
   ],
   "id": "a1f7c73d0e6974ff",
   "outputs": [],
   "execution_count": 105
  },
  {
   "cell_type": "code",
   "id": "5e7ae3174d7b892",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:20.395839Z",
     "start_time": "2025-07-08T08:10:20.391607Z"
    }
   },
   "source": [
    "q = torch.matmul(x, wq)\n",
    "k = torch.matmul(x, wk)\n",
    "v = torch.matmul(x, wv)\n",
    "q, k, v"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[0.6024, 0.8510, 0.9420, 1.0115, 0.9547],\n",
       "         [0.3446, 0.4839, 0.5289, 0.6024, 0.5216],\n",
       "         [0.5352, 0.7232, 0.7775, 0.7431, 0.8534],\n",
       "         [0.5880, 0.7507, 0.7546, 0.7756, 0.8341]]),\n",
       " tensor([[1.0095, 0.6294, 0.5370, 1.0863, 1.4507],\n",
       "         [0.5679, 0.3754, 0.3235, 0.6280, 0.7935],\n",
       "         [0.8376, 0.4398, 0.4392, 0.8453, 1.2114],\n",
       "         [0.8223, 0.4393, 0.5191, 0.8453, 1.0981]]),\n",
       " tensor([[1.4581, 0.4302, 1.2097, 1.2391, 1.5576],\n",
       "         [0.7631, 0.2405, 0.6888, 0.7100, 0.8673],\n",
       "         [1.2940, 0.3278, 1.0332, 0.9182, 1.2526],\n",
       "         [1.1068, 0.2844, 1.0820, 0.8428, 1.1486]]))"
      ]
     },
     "execution_count": 106,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 106
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:04:42.647349Z",
     "start_time": "2025-07-08T08:04:42.644872Z"
    }
   },
   "cell_type": "code",
   "source": "0.6986*0.5897+1.3881*1.4759+0.6278*1.2001",
   "id": "bad45ff5c20c6f64",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3.21408399"
      ]
     },
     "execution_count": 91,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 91
  },
  {
   "cell_type": "code",
   "id": "b51633457565bd19",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:25.320819Z",
     "start_time": "2025-07-08T08:10:25.317131Z"
    }
   },
   "source": [
    "# qk的行代表了q1、q2、q3、q4, 每一列代表了q和每一个k的注意力分数\n",
    "qk = torch.matmul(q, k.T)\n",
    "qk"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[4.1332, 2.3591, 3.3040, 3.2614],\n",
       "        [2.3474, 1.3407, 1.8748, 1.8524],\n",
       "        [3.4583, 1.9709, 2.7699, 2.7268],\n",
       "        [3.5239, 2.0089, 2.8202, 2.7766]])"
      ]
     },
     "execution_count": 107,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 107
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:27.447547Z",
     "start_time": "2025-07-08T08:10:27.444353Z"
    }
   },
   "cell_type": "code",
   "source": [
    "dk = k.size(-1)\n",
    "dk"
   ],
   "id": "1e163a7f6e376140",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "5"
      ]
     },
     "execution_count": 108,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 108
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:28.802319Z",
     "start_time": "2025-07-08T08:10:28.799802Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# dk是k向量的维度\n",
    "qk = qk / torch.sqrt(torch.tensor(dk))"
   ],
   "id": "2170429eacfb5dbe",
   "outputs": [],
   "execution_count": 109
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:30.298178Z",
     "start_time": "2025-07-08T08:10:30.294364Z"
    }
   },
   "cell_type": "code",
   "source": "qk",
   "id": "db840bceb6cdf314",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.8484, 1.0550, 1.4776, 1.4586],\n",
       "        [1.0498, 0.5996, 0.8384, 0.8284],\n",
       "        [1.5466, 0.8814, 1.2387, 1.2195],\n",
       "        [1.5759, 0.8984, 1.2612, 1.2417]])"
      ]
     },
     "execution_count": 110,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 110
  },
  {
   "cell_type": "code",
   "id": "226835415b03094",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:32.103998Z",
     "start_time": "2025-07-08T08:10:32.099571Z"
    }
   },
   "source": [
    "# 按行进行softmax，得到注意力权重\n",
    "attention_weight = torch.softmax(qk, dim=1)  # dim=1表示按行进行softmax\n",
    "attention_weight"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.3547, 0.1604, 0.2448, 0.2402],\n",
       "        [0.3078, 0.1962, 0.2492, 0.2467],\n",
       "        [0.3367, 0.1731, 0.2475, 0.2427],\n",
       "        [0.3385, 0.1719, 0.2471, 0.2424]])"
      ]
     },
     "execution_count": 111,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 111
  },
  {
   "cell_type": "code",
   "id": "2734e4e2305904d0",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:34.643364Z",
     "start_time": "2025-07-08T08:10:34.639717Z"
    }
   },
   "source": [
    "# 得到注意力输出\n",
    "o = torch.matmul(attention_weight, v)\n",
    "o"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.2221, 0.3397, 1.0523, 0.9805, 1.2740],\n",
       "        [1.1942, 0.3315, 1.0320, 0.9575, 1.2452],\n",
       "        [1.2119, 0.3366, 1.0449, 0.9719, 1.2633],\n",
       "        [1.2129, 0.3369, 1.0456, 0.9728, 1.2644]])"
      ]
     },
     "execution_count": 112,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 112
  },
  {
   "cell_type": "code",
   "id": "3fb6ef4008e773e7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:42.855224Z",
     "start_time": "2025-07-08T08:10:42.852465Z"
    }
   },
   "source": [
    "q.shape, k.shape, v.shape,"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([4, 5]), torch.Size([4, 5]), torch.Size([4, 5]))"
      ]
     },
     "execution_count": 113,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 113
  },
  {
   "cell_type": "code",
   "id": "f28db25d54456021",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:10:45.079008Z",
     "start_time": "2025-07-08T08:10:45.076220Z"
    }
   },
   "source": "x.shape, o.shape",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([4, 3]), torch.Size([4, 5]))"
      ]
     },
     "execution_count": 114,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 114
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:13:30.994456Z",
     "start_time": "2025-07-08T08:13:30.990049Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "\n",
    "class SelfAttention(nn.Module):\n",
    "\n",
    "    def __init__(self, embed_dim: int, attn_dim: int, output_dim: int):\n",
    "        super().__init__()\n",
    "\n",
    "        self.embed_dim = embed_dim\n",
    "        self.attn_dim = attn_dim\n",
    "        self.output_dim = output_dim\n",
    "\n",
    "        # QKV投影层：从输入维度映射到内部维度\n",
    "        # projection\n",
    "        self.q_proj = nn.Linear(embed_dim, self.attn_dim, bias=False) # y=wx\n",
    "        self.k_proj = nn.Linear(embed_dim, self.attn_dim, bias=False)\n",
    "        self.v_proj = nn.Linear(embed_dim, self.attn_dim, bias=False)\n",
    "\n",
    "        # 输出投影层：从内部维度映射到输出维度\n",
    "        self.out_proj = nn.Linear(self.attn_dim, self.output_dim, bias=False)\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "        输入: [batch_size, seq_len, embed_dim]\n",
    "        返回: [batch_size, seq_len, output_dim]\n",
    "        \"\"\"\n",
    "        batch_size, seq_len, embed_dim = x.shape\n",
    "\n",
    "        # 投影到QKV空间\n",
    "        #  (batch_size, seq_len, embed_dim) * (embed_dim, attn_dim)\n",
    "        q = self.q_proj(x)  # [batch_size, seq_len, attn_dim]\n",
    "        k = self.k_proj(x)  # [batch_size, seq_len, attn_dim]\n",
    "        v = self.v_proj(x)  # [batch_size, seq_len, attn_dim]\n",
    "\n",
    "        # 计算注意力分数\n",
    "        # q [batch_size, seq_len, attn_dim]\n",
    "        # k.T [batch_size, attn_dim, seq_len]\n",
    "        # q @ k.T 形状: [batch_size, seq_len, seq_len]\n",
    "        attn_scores = torch.matmul(q, k.transpose(-2, -1))\n",
    "\n",
    "        # 缩放因子：防止乘积过大\n",
    "        d_k = k.size(-1)\n",
    "        attn_scores = attn_scores / torch.sqrt(torch.tensor(d_k))\n",
    "\n",
    "        # 计算注意力权重\n",
    "        attn_weights = torch.softmax(attn_scores, dim=-1)\n",
    "\n",
    "        # 计算注意力输出\n",
    "        # attn_weights: [batch_size, seq_len, seq_len]\n",
    "        # v: [batch_size, seq_len, attn_dim]\n",
    "        # [batch_size, seq_len, attn_dim]\n",
    "        attn_out = torch.matmul(attn_weights, v)\n",
    "\n",
    "        # 投影到输出空间\n",
    "        return self.out_proj(attn_out)"
   ],
   "id": "382c25f1533dff13",
   "outputs": [],
   "execution_count": 115
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-08T08:22:02.164086Z",
     "start_time": "2025-07-08T08:22:02.160034Z"
    }
   },
   "cell_type": "code",
   "source": [
    "x = torch.rand(1, 4, 10)\n",
    "\n",
    "attn = SelfAttention(embed_dim=10, attn_dim=6, output_dim=8)\n",
    "out = attn(x)\n",
    "\n",
    "print(x.shape)\n",
    "print(out.shape)"
   ],
   "id": "ee6c2627993de8be",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 4, 10])\n",
      "torch.Size([1, 4, 8])\n"
     ]
    }
   ],
   "execution_count": 118
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
