{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "<img src=\"Attention.png\" alt=\"描述\" style=\"margin-left: auto; margin-right: auto; width:20%; height:auto; border-radius:10px;\">",
   "id": "6ae24562bf10e7b8"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-03T09:55:17.978893Z",
     "start_time": "2025-11-03T09:55:16.791707Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import math\n",
    "import torch\n",
    "from torch import nn\n",
    "\n",
    "d_model = 512\n",
    "n_head = 8\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, d_model, n_head):\n",
    "        super(MultiHeadAttention, self).__init__()\n",
    "        self.d_model = d_model\n",
    "        self.n_head = n_head\n",
    "        self.w_q = nn.Linear(d_model, d_model)\n",
    "        self.w_k = nn.Linear(d_model, d_model)\n",
    "        self.w_v = nn.Linear(d_model, d_model)\n",
    "        self.w_combine = nn.Linear(d_model, d_model)\n",
    "        self.softmax = nn.Softmax(dim=-1)\n",
    "\n",
    "    def forward(self, query, key, value, mask=None):\n",
    "        batch, time, dimension = query.shape\n",
    "        _, k_time, _ = key.shape\n",
    "        _, v_time, _ = value.shape\n",
    "        n_d = self.d_model // self.n_head\n",
    "        q = self.w_q(query)\n",
    "        k = self.w_k(key)\n",
    "        v = self.w_v(value)\n",
    "        # 将查询张量重新组织为多头注意力机制所需的格式, [1, 512, 512] -> [1, 8, 512, 64]\n",
    "        q = q.view(batch, time, self.n_head, n_d).permute(0, 2, 1, 3)\n",
    "        k = k.view(batch, k_time, self.n_head, n_d).permute(0, 2, 1, 3)\n",
    "        v = v.view(batch, v_time, self.n_head, n_d).permute(0, 2, 1, 3)\n",
    "        print(q.shape, k.shape, v.shape)\n",
    "        # score.shape:[1, 8, 512, 512]\n",
    "        score = q @ k.transpose(2,3)/math.sqrt(n_d)\n",
    "        if mask is not None:\n",
    "            score = score.masked_fill(mask==0, -1e9) # 掩码\n",
    "        score = self.softmax(score)@ v\n",
    "        # 将多头注意力的结果重新组合成原始的维度格式,[1, 8, 512, 64] -> [1, 512, 512]\n",
    "        score = score.permute(0, 2, 1, 3).contiguous().view(batch, time, self.d_model)\n",
    "        out = self.w_combine(score)\n",
    "        return out\n",
    "attention = MultiHeadAttention(d_model, n_head)\n"
   ],
   "id": "967bdec585bbdbb0",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-03T09:55:19.035676Z",
     "start_time": "2025-11-03T09:55:18.962167Z"
    }
   },
   "cell_type": "code",
   "source": [
    "out = attention(torch.randn(1, 512, 512), torch.randn(1, 512, 512), torch.randn(1, 512, 512))\n",
    "print(out, out.shape)"
   ],
   "id": "a7b20a0e1b38da8b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 8, 512, 64]) torch.Size([1, 8, 512, 64]) torch.Size([1, 8, 512, 64])\n",
      "tensor([[[ 0.0148, -0.0549, -0.0039,  ..., -0.0478,  0.0052,  0.0131],\n",
      "         [ 0.0215, -0.0598, -0.0054,  ..., -0.0547,  0.0022,  0.0115],\n",
      "         [ 0.0166, -0.0555, -0.0009,  ..., -0.0562,  0.0022,  0.0141],\n",
      "         ...,\n",
      "         [ 0.0172, -0.0581,  0.0002,  ..., -0.0579, -0.0021,  0.0103],\n",
      "         [ 0.0185, -0.0551, -0.0065,  ..., -0.0537,  0.0003,  0.0148],\n",
      "         [ 0.0157, -0.0610, -0.0027,  ..., -0.0552,  0.0065,  0.0121]]],\n",
      "       grad_fn=<ViewBackward0>) torch.Size([1, 512, 512])\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-11-03T09:20:16.215008Z",
     "start_time": "2025-11-03T09:20:16.209735Z"
    }
   },
   "cell_type": "code",
   "source": "print(torch.randn(1, 2, 3))",
   "id": "c29bb12836799ab9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[ 1.8608, -0.5740, -0.3080],\n",
      "         [-0.2690, -0.1159, -0.0652]]])\n"
     ]
    }
   ],
   "execution_count": 3
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
