{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###### attention 及其可视化\n",
    "* 理解多头"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Transformer的结构：\n",
    "* 多头缩放注意力机制\n",
    "* 残差连接和归一化层\n",
    "* 位置编码\n",
    "* softmax"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "d_k,d_v = 64,64\n",
    "# 缩放注意力机制：\n",
    "# 过程：点积、缩放、掩码、归一化、应用到V\n",
    "class ScaledDotProductAttention(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ScaledDotProductAttention, self).__init__()\n",
    "    def forword(self,Q,K,V,attn_mask):\n",
    "        # Q,K,V [batch_size, head_num, len_q/k/v, dim_q]\n",
    "        # sorces [batch_size, head_num, len_q, len_k]\n",
    "        scorces = torch.matmul(Q,K.transpose(-1,-2))/np.sqrt(d_k) # 括号里表示要交换的维度. 缩放因子一般是特征纬度的方根\n",
    "        # 加入掩码：\n",
    "        # attn_mask [batch_size, n_head, len_q, len_k]，必须和score的尺寸一致\n",
    "        scorces.masked_fill(attn_mask, -1e9) # 后面表示对应替换的值\n",
    "        # 对分数进行归一化：\n",
    "        weights = nn.Softmax(dim=-1)(scorces) # 最里面是len_q*len_k的矩阵，每行表示Q里第一个元素对K里不同元素的“关注度”，对这个关注度归一化\n",
    "        # 用到V上：context [batch_size, head_num, len_q, dim_v]\n",
    "        context = torch.matmul(weights, V)\n",
    "        return context, weights\n",
    "        \n",
    " "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 多头注意力机制 + 残差连接 + NormLayer\n",
    "d_embedding = 512\n",
    "n_heads = 8\n",
    "batch_size = 3\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MultiHeadAttention, self).__init__()\n",
    "        # Q和K的特征纬度必须相同，K和V的seq_len必须相同\n",
    "        # 对QKV通过线性变换增加复杂度: 把每个词的向量映射到不同的向量空间，以便从不同的角度捕获不同的信息\n",
    "        self.W_Q = nn.Linear(d_embedding, d_k*n_heads)# 第一个参数是输入的维度，第二个是输出的味道\n",
    "        self.W_K = nn.Linear(d_embedding, d_k*n_heads)\n",
    "        self.W_V = nn.Linear(d_embedding, d_v*n_heads)\n",
    "        self.linear = nn.Linear(n_heads*d_v, d_embedding)\n",
    "        self.layernorm = nn.LayerNorm(d_embedding)\n",
    "    def forward(self,Q,K,V):\n",
    "        # Q,K,V的尺寸 [batch_size, len_q/k/v, embedding_dim]\n",
    "        # 存残差连接的部分\n",
    "        residual,batch_size = Q, Q.size(0)\n",
    "        # 对QKV进行线性变换，并分割为多头：\n",
    "        q_s = self.W_Q(Q).view(batch_size, -1, n_heads, d_k).transpose(1,2)\n",
    "        k_s = self.W_K(K).view(batch_size, -1, n_heads, d_k).transpose(1,2)\n",
    "        v_s = self.W_V(V).view(batch_size, -1, n_heads, d_v).transpose(1,2)\n",
    "        # 将注意力掩码复制到多头：\n",
    "        attn_mask = attn_mask.unsqueeze(1).repeat(1,n_heads,1,1) # unsqueeze()在指定位置插入一个维度，保证每个头得到的mask一致\n",
    "        # 进行注意力计算\n",
    "        context,weights = ScaledDotProductAttention()(q_s,k_s,v_s,attn_mask) # 这里不写函数名就可以直接调用？因为继承了类？\n",
    "        # 把结果进行拼接：\n",
    "        context = context.transpose(1,2).contiguous().view(batch_size,-1,n_heads*d_v)\n",
    "        # 线性层把输出结果转换为embedding维度：\n",
    "        output = self.linear(context)\n",
    "        # 残差连接和layernorm：\n",
    "        output = self.layernorm(output+residual)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###### 我是这么理解多头的：\n",
    "###### like this:[batch_size, len_q, n_head*dim_k] 到 [batch_size, n_head, len_q, dim_k]，就是说把一个词的不同特征给拆开成了多个头看，一个头只关注一部分特征；\n",
    "###### therefore，所谓的交换维度就是把不同词的同一部分维度的向量部分放在一起了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 理解一下多头：\n",
    "import torch\n",
    "origin = torch.tensor(\n",
    "    [\n",
    "        [[1,2,3,4],[2,2,2,2]]\n",
    "    ]\n",
    ")\n",
    "n_heads = 2\n",
    "batch_size = origin.size(0)\n",
    "d_k = 2\n",
    "\n",
    "trans = origin.view(batch_size,-1,n_heads,d_k)\n",
    "mulithead = trans.transpose(1,2)\n",
    "print(mulithead)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### FFN 前馈神经网络：\n",
    "* 没太理解作用是什么\n",
    "* 书里用卷积网络代替了原论文里的全连接神经网络，以减少参数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "incomplete input (4119132875.py, line 3)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;36m  Cell \u001b[1;32mIn[1], line 3\u001b[1;36m\u001b[0m\n\u001b[1;33m    def __init__(self):\u001b[0m\n\u001b[1;37m                       ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m incomplete input\n"
     ]
    }
   ],
   "source": [
    "class PoswiseFeedForwardNet(nn.Module):\n",
    "    # 逐位置前馈神经网络：不打乱输入的序列信息且并行处理：transformer的核心：\n",
    "    def __init__(self,d_ff=2048):\n",
    "        super(PoswiseFeedForwardNet,self).__init__()\n",
    "        # 第一个卷积层：1维卷积，把得到的结果映射到高位空间，去进行更复杂的信息抽取：\n",
    "        self.conv1 = nn.Conv1d(in_channels = d_embedding,out_channels=d_ff,kernel_size=1) # 输出通道数即卷积核的数量\n",
    "        # 第二个卷积层：把维度恢复，降回我们的目标维度空间：\n",
    "        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_embedding,kernel_size=1)\n",
    "        # 归一化层：\n",
    "        self.layer_norm = nn.LayerNorm(d_embedding)\n",
    "    def forward(self,inputs):\n",
    "        # inputs是上一层的输出结果，[batch_size, len_q, d_embedding]\n",
    "        residual = inputs\n",
    "        # transpose后，d_embedding会被当成通道数喂入卷积网络：所以对每个维度特征进行了高维映射\n",
    "        # 一般卷积结果用relu()激活函数做处理（max(0,x)）\n",
    "        output = nn.ReLU()(self.conv1(inputs.transpose(1,2))) # 输出尺寸 [batch_size, len_q, d_ff=2048]\n",
    "        output = nn.ReLU()(self.conv2(output).tranpose(1,2)) # [batch_size, len_q ,d_embedding]\n",
    "        # 残差链接\n",
    "        output = self.layer_norm(output+residual) # # [batch_size, len_q ,d_embedding]\n",
    "        return output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 还差两个组件：\n",
    "* 位置编码\n",
    "<br>$PE(pos, 2i) = sin(\\frac{pos} {10000^\\frac{2i}{d}})$\n",
    "<br>$PE(pos, 2i+1) = cos(\\frac{pos} {10000^\\frac{2i}{d}})$\n",
    "<br> pos表示的词向量在序列中的位置，i取值范围[0,embedding_dim/2-1]是特征维度的index\n",
    "* 生成掩码的函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "cannot assign to expression here. Maybe you meant '==' instead of '='? (2346052964.py, line 5)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;36m  Cell \u001b[1;32mIn[1], line 5\u001b[1;36m\u001b[0m\n\u001b[1;33m    sinusiod+table = np.zeros((n_position, embedding_dim))\u001b[0m\n\u001b[1;37m    ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m cannot assign to expression here. Maybe you meant '==' instead of '='?\n"
     ]
    }
   ],
   "source": [
    "# 位置编码函数：\n",
    "def get_sin_enc_table(n_position, embedding_dim):\n",
    "    # 位置编码是对每个embedding后的词的每个维度都进行的相对位置的编码：\n",
    "    # n_position: 输出的序列的最大长度（？len_q）；embedding_dim 词特征维度\n",
    "    sinusoid_table = np.zeros((n_position, embedding_dim))\n",
    "    for pos_i in range(n_position):\n",
    "        for j in range(embedding_dim):\n",
    "            angle = pos_i/ np.power(10000, 2*(j//2)/embedding_dim)\n",
    "            sinusoid_table[pos_i,j] = angle\n",
    "    # 计算sin和cos\n",
    "    sinusoid_table[:,0::2] = np.sin(sinusoid_table[:,0::2]) # 偶数维度计算sin\n",
    "    sinusoid_table[:,1::2] = np.cos(sinusoid_table[:,1::2]) # 奇数维度计算cos\n",
    "    return torch.FloatTensor(sinusoid_table)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_attn_mask(seq_q, seq_k):\n",
    "    # seq_q Query序列：[batch_size, len_q]; seq_k：Key序列, [batch_size, len_k]\n",
    "    batch_size,len_q = seq_q.size()\n",
    "    batch_size,len_k= seq_k.size()\n",
    "    pad_attn_mask = seq_k.data.eq(0).unsqueeze(1)\n",
    "    pad_attn_mask = pad_attn_mask.expand(batch_size,len_q,len_k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
