{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "前面，一个i1只会对应一个q，一个k, 一个v，所以叫单头。\n",
    "\n",
    "而所谓多头，比如两头，就是一个i1会对应两个1，两个k，两个v，所以叫多头。"
   ],
   "id": "fb712db5ee1d4280"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T15:25:59.795391Z",
     "start_time": "2025-05-24T15:25:59.781454Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "\n",
    "# i表示输入的一个seq，i1、i2、i3、i4表示seq中的4个token，每个token的词向量是3维向量\n",
    "i1 = torch.rand(1, 3)\n",
    "i2 = torch.rand(1, 3)\n",
    "i3 = torch.rand(1, 3)\n",
    "i4 = torch.rand(1, 3)\n",
    "\n",
    "# 合并i1、i2、i3、i4\n",
    "i = torch.cat([i1, i2, i3, i4], dim=0)  # i的形状是(4, 3)，表示i中有4个token，每个token的形状是的向量长度为3\n",
    "\n",
    "# w是二维矩阵\n",
    "wq_head1 = torch.rand(3, 2)\n",
    "wq_head2 = torch.rand(3, 2)\n",
    "wk_head1 = torch.rand(3, 2)\n",
    "wk_head2 = torch.rand(3, 2)\n",
    "wv_head1 = torch.rand(3, 2)\n",
    "wv_head2 = torch.rand(3, 2)\n",
    "\n",
    "wo = torch.rand(2, 2)\n",
    "\n",
    "q_head1 = torch.matmul(i, wq_head1)  # wq的形状是(3, 2), i的形状(4, 3), q的形状是(4, 2)\n",
    "q_head2 = torch.matmul(i, wq_head2)  # wq的形状是(3, 2), i的形状(4, 3), q的形状是(4, 2)\n",
    "k_head1 = torch.matmul(i, wk_head1)  # wk的形状是(3, 2), i的形状(4, 3), k的形状是(4, 2)\n",
    "k_head2 = torch.matmul(i, wk_head2)  # wk的形状是(3, 2), i的形状(4, 3), k的形状是(4, 2)\n",
    "v_head1 = torch.matmul(i, wv_head1)  # wv的形状是(3, 2), i的形状(4, 3), v的形状是(4, 2)\n",
    "v_head2 = torch.matmul(i, wv_head2)  # wv的形状是(3, 2), i的形状(4, 3), v的形状是(4, 2)\n",
    "\n",
    "qk_head1 = torch.matmul(q_head1, k_head1.T)  # q的形状是(4, 2), k.T的形状是(2, 4), qk的形状是(4, 4)\n",
    "o_head1 = torch.matmul(torch.softmax(qk_head1, dim=-1), v_head1)  # qk的形状是(4, 4), v的形状是(4, 2), o的形状是(4, 2), 输出是q的行，v的列, q的行等于i的行，这就对上了, dim=-1是按行进行softmax\n",
    "\n",
    "qk2 = torch.matmul(q_head2, k_head2.T)  # q的形状是(4, 2), k.T的形状是(2, 4), qk的形状是(4, 4)\n",
    "o_head2 = torch.matmul(torch.softmax(qk2, dim=-1), v_head2)  # qk的形状是(4, 4), v的形状是(4, 2), o的形状是(4, 2), 输出是q的行，v的列, q的行等于i的行，这就对上了\n",
    "\n",
    "print(o_head1)\n",
    "print(o_head2)\n",
    "o = torch.matmul(torch.cat([o_head1, o_head2], dim=0), wo)\n",
    "o"
   ],
   "id": "18cc2965ef964b2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[1.0198, 0.3860],\n",
      "        [1.0123, 0.3840],\n",
      "        [0.9911, 0.3780],\n",
      "        [1.0078, 0.3829]])\n",
      "tensor([[0.2990, 0.5088],\n",
      "        [0.2970, 0.5058],\n",
      "        [0.2901, 0.4956],\n",
      "        [0.2945, 0.5022]])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[0.9327, 1.1581],\n",
       "        [0.9263, 1.1501],\n",
       "        [0.9083, 1.1274],\n",
       "        [0.9227, 1.1454],\n",
       "        [0.5361, 0.6084],\n",
       "        [0.5328, 0.6046],\n",
       "        [0.5214, 0.5916],\n",
       "        [0.5287, 0.5999]])"
      ]
     },
     "execution_count": 63,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 63
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T15:25:59.836853Z",
     "start_time": "2025-05-24T15:25:59.833039Z"
    }
   },
   "cell_type": "code",
   "source": "q_head1\n",
   "id": "47da066e8fa3ab61",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0277, 0.4852],\n",
       "        [0.8772, 0.4506],\n",
       "        [0.5097, 0.2065],\n",
       "        [0.7879, 0.4787]])"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 64
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T15:25:59.897541Z",
     "start_time": "2025-05-24T15:25:59.894613Z"
    }
   },
   "cell_type": "code",
   "source": "k_head1.T",
   "id": "91d939df53107fd4",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0057, 1.0469, 0.5064, 1.0779],\n",
       "        [0.6135, 0.5327, 0.4113, 0.3430]])"
      ]
     },
     "execution_count": 65,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 65
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T15:25:59.963174Z",
     "start_time": "2025-05-24T15:25:59.960355Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 第一行的结果就是第1个head的注意分数，所以soft_max要dim=-1，表示按行进行softmax\n",
    "qk_head1"
   ],
   "id": "847b475459c46f1d",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.3312, 1.3344, 0.7200, 1.2741],\n",
       "        [1.1587, 1.1584, 0.6296, 1.1001],\n",
       "        [0.6392, 0.6435, 0.3430, 0.6201],\n",
       "        [1.0861, 1.0799, 0.5959, 1.0134]])"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 66
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T15:26:00.034065Z",
     "start_time": "2025-05-24T15:26:00.030380Z"
    }
   },
   "cell_type": "code",
   "source": "torch.softmax(qk_head1, dim=-1)",
   "id": "ff0f9c16c79a9fcf",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.2865, 0.2874, 0.1555, 0.2706],\n",
       "        [0.2831, 0.2831, 0.1668, 0.2670],\n",
       "        [0.2682, 0.2693, 0.1994, 0.2631],\n",
       "        [0.2828, 0.2810, 0.1732, 0.2630]])"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 67
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "多头注意力就是增加了另外的q、k、v，从而可以从多个角度来捕捉token之间的相关性。",
   "id": "4605d9dfe0d35435"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-24T15:26:00.105163Z",
     "start_time": "2025-05-24T15:26:00.098599Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, embed_dim, head_dim, num_heads):\n",
    "        super().__init__()\n",
    "\n",
    "        self.num_heads = num_heads\n",
    "        self.head_dim = head_dim\n",
    "\n",
    "        # 合并所有头的线性变换矩阵\n",
    "        self.q_proj = nn.Linear(embed_dim, head_dim * num_heads)   # 维度(3, 4)\n",
    "        self.k_proj = nn.Linear(embed_dim, head_dim * num_heads)\n",
    "        self.v_proj = nn.Linear(embed_dim, head_dim * num_heads)\n",
    "        self.out_proj = nn.Linear(head_dim * num_heads, embed_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        seq_len, _ = x.shape\n",
    "\n",
    "        # 计算所有头的qkv\n",
    "        q = self.q_proj(x)  # x是(4, 3), 输出是(4, 4)\n",
    "        k = self.k_proj(x)  # x是(4, 3), 输出是(4, 4)\n",
    "        v = self.v_proj(x)  # x是(4, 3), 输出是(4, 4)\n",
    "\n",
    "        # 分割为多个头\n",
    "        # view()的作用是将张量reshape为指定形状，也就是(4, 2, 2)\n",
    "        # transpose(0, 1)的作用是将维度1和维度2进行转置，也就是(2, 4, 2)把num_heads放到前面去，这样就方便去每个head的qkv\n",
    "        q = q.view(seq_len, self.num_heads, self.head_dim).transpose(0, 1)\n",
    "        k = k.view(seq_len, self.num_heads, self.head_dim).transpose(0, 1)\n",
    "        v = v.view(seq_len, self.num_heads, self.head_dim).transpose(0, 1)\n",
    "\n",
    "        # 计算注意力得分 [num_heads, seq_len, seq_len]，最终形状为(2, 4, 4)\n",
    "        scores = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim ** 0.5)  #添加缩放因子，防止乘积过大\n",
    "\n",
    "        # 计算注意力权重 [num_heads, seq_len, seq_len]，最终形状为(2, 4, 4)\n",
    "        attn_weights = F.softmax(scores, dim=-1)\n",
    "\n",
    "        # 应用注意力权重 [num_heads, seq_len, head_dim]，最终形状为(2, 4, 2)\n",
    "        output = torch.matmul(attn_weights, v)\n",
    "\n",
    "        # 合并所有头\n",
    "        # output.transpose(0, 1) [num_heads, seq_len, head_dim] -> [seq_len, num_heads, head_dim]\n",
    "        output = output.transpose(0, 1).contiguous()\n",
    "\n",
    "        # [seq_len, num_heads, head_dim] -> [seq_len, num_heads * head_dim] 也就是(4, 2*2)\n",
    "        output = output.view(seq_len, -1)\n",
    "\n",
    "        # 最终输出投影\n",
    "        return self.out_proj(output)\n",
    "\n",
    "\n",
    "# 测试代码\n",
    "if __name__ == \"__main__\":\n",
    "    # 输入数据 (seq_len=4, embed_dim=3)\n",
    "    x = torch.rand(4, 3)\n",
    "\n",
    "    # 创建多头注意力模块 (embed_dim=3, head_dim=2, num_heads=2)\n",
    "    mha = MultiHeadAttention(embed_dim=3, head_dim=2, num_heads=2)\n",
    "\n",
    "    # 前向传播\n",
    "    output = mha(x)\n",
    "\n",
    "    print(\"Input shape:\", x.shape)\n",
    "    print(\"Output shape:\", output.shape)\n",
    "    print(\"Output:\\n\", output)"
   ],
   "id": "403c5a8fcae45aae",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input shape: torch.Size([4, 3])\n",
      "Output shape: torch.Size([4, 3])\n",
      "Output:\n",
      " tensor([[ 0.0167, -0.2603,  0.7324],\n",
      "        [ 0.0189, -0.2591,  0.7261],\n",
      "        [ 0.0202, -0.2588,  0.7211],\n",
      "        [ 0.0169, -0.2602,  0.7318]], grad_fn=<AddmmBackward0>)\n"
     ]
    }
   ],
   "execution_count": 68
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
