{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-17T13:19:24.796995Z",
     "start_time": "2024-11-17T13:19:24.765523Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# nn.Linear() \n",
    "# 参考：https://baijiahao.baidu.com/s?id=1769183854487564717&wfr=spider&for=pc\n",
    "# nn.Linear 是PyTorch中的一个类，用于定义线性变换（全连接层）。它是神经网络中常用的一种层类型，主要用于实现输入张量与权重矩阵之间的线性变换。\n",
    "# \n",
    "# 原理和功能\n",
    "# nn.Linear表示的是线性变换，原型是初级数学中学到的线性函数y=kx+b。在深度学习中，变量都是多维张量，乘法就是矩阵乘法，加法就是矩阵加法。因此，nn.Linear()运行的计算就是output = weight @ input + bias，其中input表示输入的Tensor，weights表示可学习的权重，bias表示可学习的偏置。\n",
    "# \n",
    "# 参数和使用方法\n",
    "# nn.Linear的构造函数为nn.Linear(in_features, out_features, bias=True)：\n",
    "# \n",
    "# in_features：输入特征的大小，即输入张量的最后一维大小。\n",
    "# out_features：输出特征的大小，即输出张量的最后一维大小。\n",
    "# bias：是否使用偏置项，默认为True，表示使用偏置项。\n",
    "# 例如：\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "linear_layer = nn.Linear(in_features=3, out_features=2).to(torch.float32)\n",
    "input_tensor = torch.tensor([[[1, 2, 3], [4, 5, 6]], \n",
    "                             [[1, 2, 3], [4, 5, 6]]]).to(torch.float32)\n",
    "output = linear_layer(input_tensor)\n",
    "print(output.shape)  # torch.Size([2, 2, 2])\n"
   ],
   "id": "24c32b4506a08575",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 2, 2])\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-11-26T01:52:40.119942Z",
     "start_time": "2024-11-26T01:52:40.044177400Z"
    }
   },
   "source": [
    "# 单头注意力\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from typing import List\n",
    "\n",
    "def get_input_embeddings(words: List[str], embeddings_dim: int):\n",
    "    # we are creating random vector of embeddings_dim size for each words\n",
    "    # normally we train a tokenizer to get the embeddings.\n",
    "    # check the blog on tokenizer to learn about this part\n",
    "    embeddings = [torch.randn(embeddings_dim) for word in words]\n",
    "    return embeddings\n",
    "\n",
    "\n",
    "text = \"I should sleep now\"\n",
    "words = text.split(\" \")\n",
    "print(f'len(words)={len(words)}')   # 4\n",
    "\n",
    "\n",
    "\n",
    "embeddings_dim = 512 # 512 dim because the original paper uses it. we can use other dim also\n",
    "embeddings = get_input_embeddings(words, embeddings_dim=embeddings_dim)\n",
    "print(f'len(embeddings)={len(embeddings)}') # [4,512]\n",
    "print(f'embeddings[0].shape={embeddings[0].shape}') # torch.Size([512])\n",
    "\n",
    "\n",
    "# initialize the query, key and value metrices\n",
    "query_matrix = nn.Linear(embeddings_dim, embeddings_dim)  # 线性层，模拟 input * Wq，其中Wq.shape=[embeddings_dim, embeddings_dim]\n",
    "key_matrix = nn.Linear(embeddings_dim, embeddings_dim)\n",
    "value_matrix = nn.Linear(embeddings_dim, embeddings_dim)\n",
    "print(f'query_matrix.weight.shape={query_matrix.weight.shape}')  # torch.Size([512, 512])\n",
    "print(f'key_matrix.weight.shape={key_matrix.weight.shape}')      # torch.Size([512, 512])\n",
    "print(f'value_matrix.weight.shape={value_matrix.weight.shape}')  # torch.Size([512, 512])\n",
    "\n",
    "\n",
    "# embeddings是一个list，每个元素是一个tensor，shape=[512]，表示一个word\n",
    "# 如下，query_matrix(embedding)表示 word * Wq（[512] * [512,512]），得到Q_word（[1,512]）\n",
    "# 然后，torch.stack将句子中的每个work拼接起来，得到Q（[4,512]）\n",
    "# 下面的 query_vectors、key_vectors、value_vectors 就是自注意力中的三个著名矩阵 Q、K、V\n",
    "query_vectors = torch.stack([query_matrix(embedding) for embedding in embeddings])  # torch.Size([4, 512])\n",
    "key_vectors = torch.stack([key_matrix(embedding) for embedding in embeddings])      # torch.Size([4, 512])\n",
    "value_vectors = torch.stack([value_matrix(embedding) for embedding in embeddings])  # torch.Size([4, 512])\n",
    "print(f'query_vectors.shape={query_vectors.shape}')  # torch.Size([4, 512])\n",
    "print(f'key_vectors.shape={key_vectors.shape}')      # torch.Size([4, 512])\n",
    "print(f'value_vectors.shape={value_vectors.shape}')  # torch.Size([4, 512])\n",
    "\n",
    "\n",
    "# 计算注意力分数，即 (Q * transpose(K)) / 根号(embeddings_dim)，形状为 ([4, 512]] * [512, 4]) / 常数 = [4, 4]\n",
    "scores = torch.matmul(query_vectors, key_vectors.transpose(-2, -1)) / torch.sqrt(torch.tensor(embeddings_dim, dtype=torch.float32))\n",
    "print(f'scores.shape={scores.shape}') # torch.Size([4, 4])\n",
    "\n",
    "\n",
    "# 对分数矩阵按行进行softmax计算，按行softmax\n",
    "softmax = nn.Softmax(dim=-1)\n",
    "attention_weights = softmax(scores)\n",
    "print(f'attention_weights.shape={attention_weights.shape}') # torch.Size([4, 4])\n",
    "\n",
    "\n",
    "# softmax的结果与V矩阵乘，得到注意力输出\n",
    "output = torch.matmul(attention_weights, value_vectors)\n",
    "print(f'output.shape={output.shape}') # torch.Size([4, 512])\n",
    "\n",
    "# 最终计算出来的注意力输出shape为 [batch_size, seq_len, embedding_dim]\n",
    "# 以上代码只是为了展示注意力机制的实现，并未优化。"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "len(words)=4\n",
      "len(embeddings)=4\n",
      "embeddings[0].shape=torch.Size([512])\n",
      "query_matrix.weight.shape=torch.Size([512, 512])\n",
      "key_matrix.weight.shape=torch.Size([512, 512])\n",
      "value_matrix.weight.shape=torch.Size([512, 512])\n",
      "query_vectors.shape=torch.Size([4, 512])\n",
      "key_vectors.shape=torch.Size([4, 512])\n",
      "value_vectors.shape=torch.Size([4, 512])\n",
      "scores.shape=torch.Size([4, 4])\n",
      "attention_weights.shape=torch.Size([4, 4])\n",
      "output.shape=torch.Size([4, 512])\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-17T14:43:17.680317Z",
     "start_time": "2024-11-17T14:43:17.559883Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 多头注意力\n",
    "# 多头注意力\n",
    "# 上面提到的注意力是单头注意力，在原论文中有8个头。对于多头和单多头注意力计算相同，只是查询(q0-q3)，键(k0-k3)，值(v0-v3)中间向量会有一些区别。\n",
    "# \n",
    "# \n",
    "# 之后将查询向量分成相等的部分（有多少头就分成多少）。在上图中有8个头，查询，键和值向量的维度为512。所以就变为了8个64维的向量。\n",
    "# \n",
    "# 把前64个向量放到第一个头，第二组向量放到第二个头，以此类推。在上面的图片中，我只展示了第一个头的计算。\n",
    "# \n",
    "# 这里需要注意的是：不同的框架有不同的实现方法，pytorch官方的实现是上面这种，但是tf和一些第三方的代码中是将每个头分开计算了，比如8个头会使用8个linear（tf的dense）而不是一个大linear再拆解。还记得Pytorch的transformer里面要求emb_dim能被num_heads整除吗，就是因为这个\n",
    "# \n",
    "# 使用哪种方式都可以，因为最终的结果都类似影响不大。\n",
    "# \n",
    "# 当我们在一个head中有了小查询、键和值(64 dim的)之后，计算剩下的逻辑与单个head注意相同。最后得到的64维的向量来自每个头。\n",
    "# \n",
    "# 我们将每个头的64个输出组合起来，得到最后的512个dim输出向量。\n",
    "# \n",
    "# \n",
    "# 多头注意力可以表示数据中的复杂关系。每个头都能学习不同的模式。多个头还提供了同时处理输入表示的不同子空间(本例：64个向量表示512个原始向量)的能力。\n",
    "# 多头注意代码实现\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from typing import List\n",
    "\n",
    "def get_input_embeddings(words: List[str], embeddings_dim: int):\n",
    "    # we are creating random vector of embeddings_dim size for each words\n",
    "    # normally we train a tokenizer to get the embeddings.\n",
    "    # check the blog on tokenizer to learn about this part\n",
    "    embeddings = [torch.randn(embeddings_dim) for word in words]\n",
    "    return embeddings\n",
    "\n",
    "\n",
    "num_heads = 8\n",
    "# batch dim is 1 since we are processing one text.\n",
    "batch_size = 1\n",
    "\n",
    "text = \"I should sleep now\"\n",
    "words = text.split(\" \")\n",
    "print(f'len(words)={len(words)}') # 4\n",
    "\n",
    "\n",
    "embeddings_dim = 512\n",
    "embeddings = get_input_embeddings(words, embeddings_dim=embeddings_dim)\n",
    "print(f'embeddings[0].shape={embeddings[0].shape}') # torch.Size([512])\n",
    "\n",
    "\n",
    "# initialize the query, key and value metrices\n",
    "query_matrix = nn.Linear(embeddings_dim, embeddings_dim)\n",
    "key_matrix = nn.Linear(embeddings_dim, embeddings_dim)\n",
    "value_matrix = nn.Linear(embeddings_dim, embeddings_dim)\n",
    "# query_matrix.weight.shape, key_matrix.weight.shape, value_matrix.weight.shape # torch.Size([512, 512]), torch.Size([512, 512]), torch.Size([512, 512])\n",
    "\n",
    "\n",
    "# query, key and value vectors computation for each words embeddings\n",
    "query_vectors = torch.stack([query_matrix(embedding) for embedding in embeddings])\n",
    "key_vectors = torch.stack([key_matrix(embedding) for embedding in embeddings])\n",
    "value_vectors = torch.stack([value_matrix(embedding) for embedding in embeddings])\n",
    "# query_vectors.shape, key_vectors.shape, value_vectors.shape # torch.Size([4, 512]), torch.Size([4, 512]), torch.Size([4, 512])\n",
    "\n",
    "# 上面和单头都一样\n",
    "# 此处开始，多头与单头不一样了\n",
    "# Q、K、V的shape由 [1, 4, 512] 变为 [1, 8, 4, 64]，即 [batch_size, seq_len, embedding_dim] 变为 [batch_size, heads, seq_len, embedding_dim/heads]，即 BSH -> BNSD\n",
    "# (batch_size, num_heads, seq_len, embeddings_dim)\n",
    "query_vectors_view = query_vectors.view(batch_size, -1, num_heads, embeddings_dim//num_heads).transpose(1, 2)\n",
    "key_vectors_view = key_vectors.view(batch_size, -1, num_heads, embeddings_dim//num_heads).transpose(1, 2)\n",
    "value_vectors_view = value_vectors.view(batch_size, -1, num_heads, embeddings_dim//num_heads).transpose(1, 2)\n",
    "query_vectors_view.shape, key_vectors_view.shape, value_vectors_view.shape\n",
    "# torch.Size([1, 8, 4, 64]),\n",
    "# torch.Size([1, 8, 4, 64]),\n",
    "# torch.Size([1, 8, 4, 64])\n",
    "\n",
    "\n",
    "# 如下，只取出第0个头 >>>>>>>>>>>>>>>>>>>>>>>>>> 开始 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
    "# We are splitting the each vectors into 8 heads.\n",
    "# Assuming we have one text (batch size of 1), So we split\n",
    "# the embedding vectors also into 8 parts. Each head will\n",
    "# take these parts. If we do this one head at a time.\n",
    "head1_query_vector = query_vectors_view[0, 0, ...]\n",
    "head1_key_vector = key_vectors_view[0, 0, ...]\n",
    "head1_value_vector = value_vectors_view[0, 0, ...]\n",
    "print(f'head1_query_vector.shape={head1_query_vector.shape}')  # torch.Size([4, 64])\n",
    "print(f'head1_key_vector.shape={head1_key_vector.shape}')      # torch.Size([4, 64])\n",
    "print(f'head1_value_vector.shape={head1_value_vector.shape}')  # torch.Size([4, 64])\n",
    "\n",
    "\n",
    "# The above vectors are of same size as before only the feature dim is changed from 512 to 64\n",
    "# compute the score\n",
    "# head1_query_vector.shape = [4,64]\n",
    "scores_head1 = torch.matmul(head1_query_vector, head1_key_vector.permute(1, 0)) / torch.sqrt(torch.tensor(embeddings_dim//num_heads, dtype=torch.float32))\n",
    "print(f'scores_head1.shape={scores_head1.shape}') # torch.Size([4, 4])\n",
    "\n",
    "\n",
    "# compute the attention weights for each of the words with the other words\n",
    "softmax = nn.Softmax(dim=-1)\n",
    "attention_weights_head1 = softmax(scores_head1)\n",
    "print(f'attention_weights_head1.shape={attention_weights_head1.shape}') # torch.Size([4, 4])\n",
    "\n",
    "output_head1 = torch.matmul(attention_weights_head1, head1_value_vector)\n",
    "print(f'output_head1.shape={output_head1.shape}') # torch.Size([4, 512])\n",
    "# 如上，只取出第0个头 <<<<<<<<<<<<<<<<<<<<<<<<<< 结束 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
    "\n",
    "\n",
    "# we can compute the output for all the heads\n",
    "outputs = []\n",
    "for head_idx in range(num_heads):\n",
    "    head_idx_query_vector = query_vectors_view[0, head_idx, ...]\n",
    "    head_idx_key_vector = key_vectors_view[0, head_idx, ...]\n",
    "    head_idx_value_vector = value_vectors_view[0, head_idx, ...]\n",
    "    \n",
    "    print(f'\\nfor-{head_idx}-head{head_idx}_query_vector.shape={head_idx_query_vector.shape}')  # torch.Size([4, 64])\n",
    "    print(f'for-{head_idx}-head{head_idx}_key_vector.shape={head_idx_key_vector.shape}')  # torch.Size([4, 64])\n",
    "    print(f'for-{head_idx}-head{head_idx}_value_vector.shape={head_idx_value_vector.shape}')  # torch.Size([4, 64])\n",
    "    \n",
    "    scores_head_idx = torch.matmul(head_idx_query_vector, head_idx_key_vector.permute(1, 0)) / torch.sqrt(torch.tensor(embeddings_dim//num_heads, dtype=torch.float32))\n",
    "\n",
    "    softmax = nn.Softmax(dim=-1)\n",
    "    attention_weights_idx = softmax(scores_head_idx)\n",
    "    output = torch.matmul(attention_weights_idx, head_idx_value_vector)\n",
    "    outputs.append(output)\n",
    "\n",
    "print(f'[out.shape for out in outputs]={[out.shape for out in outputs]}')\n",
    "# [torch.Size([4, 64]),\n",
    "# torch.Size([4, 64]),\n",
    "# torch.Size([4, 64]),\n",
    "# torch.Size([4, 64]),\n",
    "# torch.Size([4, 64]),\n",
    "# torch.Size([4, 64]),\n",
    "# torch.Size([4, 64]),\n",
    "# torch.Size([4, 64])]\n",
    "\n",
    "# stack the result from each heads for the corresponding words\n",
    "# 每个头的注意力结果中，取出第0个word，然后拼起来，就得到了第0个word的最终注意力结果\n",
    "word0_outputs = torch.cat([out[0] for out in outputs])  # out[0].shape = [1,64]\n",
    "print(f'word0_outputs.shape={word0_outputs.shape}')  # torch.Size([512])\n",
    "\n",
    "# lets do it for all the words\n",
    "attn_outputs = []\n",
    "for i in range(len(words)):\n",
    "    attn_output = torch.cat([out[i] for out in outputs])\n",
    "    attn_outputs.append(attn_output)\n",
    "[attn_output.shape for attn_output in attn_outputs] # [torch.Size([512]), torch.Size([512]), torch.Size([512]), torch.Size([512])]\n",
    "\n",
    "\n",
    "# 上面是分步计算的，下面是一起计算的\n",
    "# Now lets do it in vectorize way.\n",
    "# We can not permute the last two dimension of the key vector.\n",
    "key_vectors_view.permute(0, 1, 3, 2).shape # torch.Size([1, 8, 64, 4])\n",
    "\n",
    "\n",
    "# Transpose the key vector on the last dim\n",
    "score = torch.matmul(query_vectors_view, key_vectors_view.permute(0, 1, 3, 2)) # Q*K   [1,8,4,4]  [B,N,S,D] * [B,N,D,S]  B:batch_size,N:heads,S:seq_len,D:dim\n",
    "score = torch.softmax(score, dim=-1)\n",
    "\n",
    "\n",
    "# reshape the results\n",
    "attention_results = torch.matmul(score, value_vectors_view)\n",
    "attention_results.shape # [1, 8, 4, 64]\n",
    "\n",
    "# merge the results\n",
    "attention_results = attention_results.permute(0, 2, 1, 3).contiguous().view(batch_size, -1, embeddings_dim)  # [B,S,N*D]\n",
    "attention_results.shape # torch.Size([1, 4, 512])"
   ],
   "id": "51c6da917a89274a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "len(words)=4\n",
      "embeddings[0].shape=torch.Size([512])\n",
      "head1_query_vector.shape=torch.Size([4, 64])\n",
      "head1_key_vector.shape=torch.Size([4, 64])\n",
      "head1_value_vector.shape=torch.Size([4, 64])\n",
      "scores_head1.shape=torch.Size([4, 4])\n",
      "attention_weights_head1.shape=torch.Size([4, 4])\n",
      "output_head1.shape=torch.Size([4, 64])\n",
      "\n",
      "for-0-head0_query_vector.shape=torch.Size([4, 64])\n",
      "for-0-head0_key_vector.shape=torch.Size([4, 64])\n",
      "for-0-head0_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-1-head1_query_vector.shape=torch.Size([4, 64])\n",
      "for-1-head1_key_vector.shape=torch.Size([4, 64])\n",
      "for-1-head1_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-2-head2_query_vector.shape=torch.Size([4, 64])\n",
      "for-2-head2_key_vector.shape=torch.Size([4, 64])\n",
      "for-2-head2_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-3-head3_query_vector.shape=torch.Size([4, 64])\n",
      "for-3-head3_key_vector.shape=torch.Size([4, 64])\n",
      "for-3-head3_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-4-head4_query_vector.shape=torch.Size([4, 64])\n",
      "for-4-head4_key_vector.shape=torch.Size([4, 64])\n",
      "for-4-head4_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-5-head5_query_vector.shape=torch.Size([4, 64])\n",
      "for-5-head5_key_vector.shape=torch.Size([4, 64])\n",
      "for-5-head5_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-6-head6_query_vector.shape=torch.Size([4, 64])\n",
      "for-6-head6_key_vector.shape=torch.Size([4, 64])\n",
      "for-6-head6_value_vector.shape=torch.Size([4, 64])\n",
      "\n",
      "for-7-head7_query_vector.shape=torch.Size([4, 64])\n",
      "for-7-head7_key_vector.shape=torch.Size([4, 64])\n",
      "for-7-head7_value_vector.shape=torch.Size([4, 64])\n",
      "[out.shape for out in outputs]=[torch.Size([4, 64]), torch.Size([4, 64]), torch.Size([4, 64]), torch.Size([4, 64]), torch.Size([4, 64]), torch.Size([4, 64]), torch.Size([4, 64]), torch.Size([4, 64])]\n",
      "word0_outputs.shape=torch.Size([512])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "torch.Size([1, 4, 512])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 14
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [],
   "source": [
    "# https://zhuanlan.zhihu.com/p/679950971\n",
    "# 多头注意力机制经典pytorch实现\n",
    "\n",
    "import math\n",
    "from torch import nn\n",
    "import torch\n",
    "from torch.nn import functional as F\n",
    "\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, heads, d_model, dropout=0.1):\n",
    "        super().__init__()\n",
    "        self.d_model = d_model  # 模型的维度\n",
    "        self.d_k = d_model // heads  # 每个头的维度\n",
    "        self.h = heads  # 头的数量\n",
    "\n",
    "        # 以下三个是线性层，用于处理Q（Query），K（Key），V（Value）\n",
    "        self.q_linear = nn.Linear(d_model, d_model)\n",
    "        self.v_linear = nn.Linear(d_model, d_model)\n",
    "        self.k_linear = nn.Linear(d_model, d_model)\n",
    "\n",
    "        self.dropout = nn.Dropout(dropout)  # Dropout层\n",
    "        self.out = nn.Linear(d_model, d_model)  # 输出层\n",
    "\n",
    "    def attention(self, q, k, v, d_k, mask=None, dropout=None):\n",
    "        # torch.matmul是矩阵乘法，用于计算query和key的相似度\n",
    "        scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)\n",
    "\n",
    "        if mask is not None:\n",
    "            mask = mask.unsqueeze(1)  # 在第一个维度增加维度\n",
    "            scores = scores.masked_fill(mask == 0, -1e9)  # 使用mask将不需要关注的位置设置为一个非常小的数\n",
    "\n",
    "        # 对最后一个维度进行softmax运算，得到权重\n",
    "        scores = F.softmax(scores, dim=-1)\n",
    "\n",
    "        if dropout is not None:\n",
    "            scores = dropout(scores)  # 应用dropout\n",
    "\n",
    "        output = torch.matmul(scores, v)  # 将权重应用到value上\n",
    "        return output\n",
    "\n",
    "    def forward(self, q, k, v, mask=None):\n",
    "        bs = q.size(0)  # 获取batch_size\n",
    "\n",
    "        # 将Q, K, V通过线性层处理，然后分割成多个头\n",
    "        k = self.k_linear(k).view(bs, -1, self.h, self.d_k)  # [B,S,N,D]\n",
    "        q = self.q_linear(q).view(bs, -1, self.h, self.d_k)\n",
    "        v = self.v_linear(v).view(bs, -1, self.h, self.d_k)\n",
    "\n",
    "        # 转置来获取维度为bs * h * sl * d_model的张量\n",
    "        k = k.transpose(1, 2)  # [B,N,S,D]\n",
    "        q = q.transpose(1, 2)\n",
    "        v = v.transpose(1, 2)\n",
    "\n",
    "        # 调用attention函数计算输出\n",
    "        scores = self.attention(q, k, v, self.d_k, mask, self.dropout)\n",
    "\n",
    "        # 重新调整张量的形状，并通过最后一个线性层\n",
    "        concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)\n",
    "\n",
    "        output = self.out(concat)  # 最终输出\n",
    "        return output"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-11-18T06:54:08.564463200Z",
     "start_time": "2024-11-18T06:54:07.064981300Z"
    }
   },
   "id": "e4e19a1769102bb3"
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "output.shape:  torch.Size([5, 10, 20])\n"
     ]
    }
   ],
   "source": [
    "# 多头注意力机制是一种用于处理序列数据的神经网络结构，在自然语言处理领域中得到广泛应用。它可以帮助模型更好地理解和学习输入序列中的信息，提高模型在各种任务上的性能。\n",
    "# \n",
    "# 多头注意力机制是基于注意力机制的改进版本，它引入了多个注意力头，每个头都可以关注输入序列中不同位置的信息。通过汇总多个头的输出，模型可以更全面地捕捉输入序列中的特征。\n",
    "# \n",
    "# 下面我们用一个简单的例子来演示如何使用python实现多头注意力机制。我们将使用pytorch框架来构建模型。\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, d_model, num_heads):\n",
    "        super(MultiHeadAttention, self).__init__()\n",
    "        self.num_heads = num_heads\n",
    "        self.d_model = d_model\n",
    "        self.query_linear = nn.Linear(d_model, d_model)\n",
    "        self.key_linear = nn.Linear(d_model, d_model)\n",
    "        self.value_linear = nn.Linear(d_model, d_model)\n",
    "        self.output_linear = nn.Linear(d_model, d_model)\n",
    "    def forward(self, query, key, value):\n",
    "        batch_size = query.size(0)\n",
    "        query = self.query_linear(query)\n",
    "        key = self.key_linear(key)\n",
    "        value = self.value_linear(value)\n",
    "        query = query.view(batch_size, -1, self.num_heads, self.d_model// self.num_heads).transpose(1,2)\n",
    "        key = key.view(batch_size, -1, self.num_heads, self.d_model // self.num_heads).transpose(1,2)\n",
    "        value = value.view(batch_size, -1, self.num_heads, self.d_model // self.num_heads).transpose(1,2)\n",
    "        scores = torch.matmul(query, key.transpose(-2, -1)) / (self.d_model // self.num_heads) ** 0.5\n",
    "        attention_weights = F.softmax(scores, dim = -1)\n",
    "        output = torch.matmul(attention_weights, value)\n",
    "        output = output.transpose(1,2).contiguous().view(batch_size, -1, self.d_model)\n",
    "        return self.output_linear(output)\n",
    "if __name__ == \"__main__\":\n",
    "    query = torch.randn(5,10,20)\n",
    "    key = torch.randn(5,10,20)\n",
    "    value = torch.randn(5,10,20)\n",
    "    multi_head_attention = MultiHeadAttention(d_model = 20, num_heads = 4)\n",
    "    output = multi_head_attention(query, key, value)\n",
    "    print(\"output.shape: \", output.shape)  # torch.Size([5, 10, 20])\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-11-26T07:49:45.338445700Z",
     "start_time": "2024-11-26T07:49:45.315694500Z"
    }
   },
   "id": "383b846aba529f49"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "8da41e9e46a4b8f7"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
