{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "2cc260ae-0572-40ca-9da1-7bef390b9585",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import random\n",
    "import transformers\n",
    "import torch\n",
    "from torch import nn\n",
    "import math\n",
    "from typing import List\n",
    "from transformers.models.qwen2 import (\n",
    "    Qwen2TokenizerFast,\n",
    "    Qwen2ForCausalLM,\n",
    "    Qwen2Config,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b3e55018-1115-4df0-82fb-eeb69a9b8adc",
   "metadata": {},
   "source": [
    "# Part1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ffac1340-6c67-4ad5-9fb3-7212e39391db",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
     ]
    }
   ],
   "source": [
    "# 获取模型的模型结构、tokenizer 和 config 参数，供其他 Part 的函数使用\n",
    "\n",
    "# 定义要加载的模型名称，这里是 \"Qwen/Qwen2-0.5B-Instruct\"\n",
    "model_name = \"Qwen/Qwen2-0.5B-Instruct\"\n",
    "\n",
    "# 从预训练模型中加载模型结构\n",
    "model = Qwen2ForCausalLM.from_pretrained(model_name, torch_dtype=\"auto\")\n",
    "\n",
    "# 加载与模型匹配的 tokenizer，用于对输入文本进行分词（tokenization）\n",
    "tokenizer = Qwen2TokenizerFast.from_pretrained(model_name)\n",
    "\n",
    "# 加载模型的配置文件 config，这里包含了模型的超参数和架构信息\n",
    "config = Qwen2Config.from_pretrained(model_name)\n",
    "\n",
    "# 从配置文件中提取一些关键的模型参数，方便在其他部分的代码中使用\n",
    "\n",
    "# 提取模型中的注意力头的数量。注意力机制是 Transformer 模型的核心，每个头代表一个独立的注意力计算单元\n",
    "num_attention_heads = config.num_attention_heads  # 14，模型中有14个注意力头\n",
    "\n",
    "# 提取 GQA（分组查询注意力）中 key 和 value 的头的数量。GQA 将注意力头分组，以减少计算开销\n",
    "num_key_value_heads = config.num_key_value_heads  # 2，key/value 的头的数量为 2\n",
    "\n",
    "# 提取模型中每个 token 的特征维度，即嵌入向量的维度。\n",
    "hidden_size = config.hidden_size  # 896，特征维度为 896\n",
    "\n",
    "# 提取模型支持的最大位置编码数，即模型可以处理的最大 token 数（序列长度）\n",
    "max_position_embeddings = config.max_position_embeddings  # 16384，模型支持的最大 token 个数为 16384\n",
    "\n",
    "# 计算 GQA 中每个注意力头负责的特征维度，即将总特征维度平分到每个注意力头上\n",
    "feature_per_head = (int)(hidden_size / num_attention_heads)  # 64，每个头负责 64 维特征\n",
    "\n",
    "# 计算 GQA 中分组的数量，即将总的注意力头数分为多个组，每个组共享 key/value 头\n",
    "groups = (int)(num_attention_heads / num_key_value_heads)  # 7，注意力头分为 7 组\n",
    "\n",
    "# 提取解码器（decoder）模块的层数。Transformer 模型通常由多个堆叠的解码器层组成，每层包含注意力和前馈网络等组件\n",
    "num_hidden_layers = config.num_hidden_layers  # 24，模型包含 24 层解码器\n",
    "\n",
    "# 提取 RMS Norm（Root Mean Square Layer Normalization）运算中的 epsilon 参数。该参数用于避免归一化计算中的除零问题\n",
    "rms_norm_eps = config.rms_norm_eps  # 归一化计算中的 eps 参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "994a9315-7b37-4f15-a070-242d4ed970ea",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义自己的 chat template 函数，为用户输入的文本增加模板\n",
    "def my_apply_chat_template(prompt):\n",
    "    \"\"\"\n",
    "    为用户输入的 prompt 添加自定义的聊天模板，以模拟聊天对话框的格式。\n",
    "\n",
    "    参数:\n",
    "    prompt: str 用户输入的文本\n",
    "\n",
    "    返回值:\n",
    "    str 带有模板的完整文本\n",
    "    \"\"\"\n",
    "    # 使用预训练的 tokenizer 将用户输入的文本编码为 token ID 列表\n",
    "    prompt_encoding = tokenizer.encode(prompt)\n",
    "\n",
    "    # 定义聊天模板的 token ID 列表\n",
    "    # 这些 token ID 代表预设的系统提示、用户提示以及回答等部分\n",
    "    template = (\n",
    "        [\n",
    "            151644,  # 开始 system 提示符的 token ID\n",
    "            8948,  # \"system\" 关键字的 token ID\n",
    "            198,  # 换行符 token ID\n",
    "            2610,  # \"You\" 的 token ID\n",
    "            525,  # \"are\" 的 token ID\n",
    "            264,  # \"a\" 的 token ID\n",
    "            10950,  # \"helpful\" 的 token ID\n",
    "            17847,  # \"assistant\" 的 token ID\n",
    "            13,  # 句号 token ID\n",
    "            151645,  # 结束 system 提示符的 token ID\n",
    "            198,  # 换行符 token ID\n",
    "            151644,  # 开始 user 提示符的 token ID\n",
    "            872,  # \"user\" 关键字的 token ID\n",
    "            198,  # 换行符 token ID\n",
    "        ]\n",
    "        + prompt_encoding  # 拼接用户输入的编码（用户实际输入的文本 token ID 列表）\n",
    "        + [\n",
    "            151645,  # 结束 user 提示符的 token ID\n",
    "            198,  # 换行符 token ID\n",
    "            151644,  # 开始 assistant 提示符的 token ID\n",
    "            77091,  # \"assistant\" 关键字的 token ID\n",
    "            198,  # 换行符 token ID\n",
    "        ]\n",
    "    )\n",
    "\n",
    "    # 将完整的 token ID 列表解码回文本，生成带有模板的最终输出\n",
    "    return tokenizer.decode(template)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "5b14a9e2-67fa-4250-823f-75a5f71cd6b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<|im_start|>system\n",
      "You are a helpful assistant.<|im_end|>\n",
      "<|im_start|>user\n",
      "一个星期有几天？<|im_end|>\n",
      "<|im_start|>assistant\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 定义测试用的用户输入\n",
    "prompt = \"一个星期有几天？\"\n",
    "\n",
    "# 调用模板函数生成完整的对话文本\n",
    "text = my_apply_chat_template(prompt)\n",
    "\n",
    "# 输出生成的文本\n",
    "print(text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "4473d987-f85b-4c3b-abe8-4d7ecda6f3b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实现词嵌入的功能：将用户的输入文本 prompt 转换为词嵌入向量\n",
    "def my_word_embedding_process(prompt):\n",
    "    \"\"\"\n",
    "    将用户输入的文本转换为词嵌入向量。\n",
    "\n",
    "    参数:\n",
    "    prompt: str 用户输入的文本\n",
    "\n",
    "    返回值:\n",
    "    tuple 包含输入的 ID（input_ids）和对应的词嵌入向量（word_embeddings）\n",
    "    \"\"\"\n",
    "    # 首先，将文本转换为 token ID，得到的结果为一个包含每个 token ID 的列表\n",
    "    input_ids = tokenizer.encode(prompt)\n",
    "\n",
    "    # 将 token ID 列表转换为 PyTorch 的张量（tensor），形状为 [seq_length]\n",
    "    input_ids = torch.tensor(input_ids)\n",
    "\n",
    "    # 增加一个 batch 维度，使输入张量的形状变为 [1, seq_length]，以适应模型的输入格式\n",
    "    input_ids = input_ids[None, :]\n",
    "\n",
    "    # 第一种方法实现词嵌入计算\n",
    "    # vocab_size = config.vocab_size\n",
    "    # hidden_size = config.hidden_size\n",
    "    # embedding_layer = torch.nn.Embedding(vocab_size, hidden_size)\n",
    "    # embedding_layer.weight.data.copy_(model.model.embed_tokens.weight)\n",
    "    # word_embeddings = embedding_layer(input_ids).to(torch.bfloat16)\n",
    "\n",
    "    # 词嵌入计算的第二种方法\n",
    "    # 使用已经训练好的模型中的词嵌入权重（model.model.embed_tokens.weight）来进行词嵌入\n",
    "    # torch.nn.functional.embedding 函数根据输入的 token ID 从嵌入矩阵中查找相应的向量\n",
    "    word_embeddings = torch.nn.functional.embedding(input_ids, model.model.embed_tokens.weight)\n",
    "\n",
    "    # 返回输入的 token ID 和词嵌入向量\n",
    "    return input_ids, word_embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d24e5950-6819-4a0c-994b-ec45e764725f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[151644,   8948,    198,   2610,    525,    264,  10950,  17847,     13,\n",
      "         151645,    198, 151644,    872,    198, 117041,  18830, 101437,     30,\n",
      "         151645,    198, 151644,  77091,    198]])\n",
      "<|im_start|>system\n",
      "You are a helpful assistant.<|im_end|>\n",
      "<|im_start|>user\n",
      "一个星期有几天?<|im_end|>\n",
      "<|im_start|>assistant\n",
      "\n",
      "词嵌入向量：\n",
      "tensor([[[-0.0020,  0.0210, -0.0137,  ...,  0.0057, -0.0184,  0.0120],\n",
      "         [-0.0120, -0.0040,  0.0083,  ...,  0.0083,  0.0131, -0.0119],\n",
      "         [ 0.0035, -0.0315, -0.0108,  ...,  0.0160,  0.0194, -0.0033],\n",
      "         ...,\n",
      "         [-0.0020,  0.0210, -0.0137,  ...,  0.0057, -0.0184,  0.0120],\n",
      "         [ 0.0008,  0.0099, -0.0166,  ...,  0.0017, -0.0144, -0.0074],\n",
      "         [ 0.0035, -0.0315, -0.0108,  ...,  0.0160,  0.0194, -0.0033]]],\n",
      "       dtype=torch.bfloat16, grad_fn=<EmbeddingBackward0>)\n",
      "词嵌入向量的维度为: torch.Size([1, 23, 896])\n"
     ]
    }
   ],
   "source": [
    "# 用户输入的测试文本\n",
    "user_prompt = \"一个星期有几天?\"\n",
    "\n",
    "# 调用自定义的聊天模板函数，将用户输入的文本应用聊天模板\n",
    "prompt = my_apply_chat_template(user_prompt)\n",
    "\n",
    "# 调用词嵌入处理函数，得到输入的 token ID 和对应的词嵌入向量\n",
    "prompt_ids, embeddings = my_word_embedding_process(prompt)\n",
    "\n",
    "# 打印 token ID\n",
    "print(prompt_ids)\n",
    "\n",
    "# 使用 tokenizer 将 token ID 解码回原始文本，验证编码解码过程是否正确\n",
    "print(tokenizer.decode(prompt_ids[0]))\n",
    "\n",
    "# 打印词嵌入向量\n",
    "print(f\"词嵌入向量：\\n{embeddings}\")\n",
    "\n",
    "# 打印词嵌入向量的维度，显示为 (batch_size, seq_length, hidden_dim)\n",
    "print(f\"词嵌入向量的维度为: {embeddings.shape}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3c63d8c4-9a39-4e41-9054-0690446c63a2",
   "metadata": {},
   "source": [
    "# Part2"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d8c5d6f1-003c-4a8d-8c41-41d05d261fa5",
   "metadata": {},
   "source": [
    "## sdpa"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "aed7cb86-7bf8-4568-8664-d2e2cd7e275b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 20, 896])\n"
     ]
    }
   ],
   "source": [
    "# 定义输入句子的长度，这里假设句子包含 20 个 token\n",
    "seq_length = 20\n",
    "\n",
    "# 在 qwen2 模型中，hidden_size 是 896，表示每个 token 的特征维度\n",
    "# 创建 Query (Q), Key (K), Value (V) 矩阵，假设 batch_size 为 1\n",
    "# 形状为 [1, seq_length, hidden_size]，即 [1, 20, 896]\n",
    "query_states = torch.randn(1, seq_length, hidden_size)\n",
    "key_states = torch.randn(1, seq_length, hidden_size)\n",
    "value_states = torch.randn(1, seq_length, hidden_size)\n",
    "\n",
    "# 对 Key 矩阵进行转置，交换 seq_length 和 hidden_dim 两个维度\n",
    "# 转置后的 Key 矩阵代表论文中的 K^T（Key 的转置）\n",
    "# 转置后 Key 的形状变为 [1, hidden_size, seq_length] 即 [1, 896, 20]\n",
    "key_states = key_states.transpose(1, 2)\n",
    "\n",
    "# 计算 Query 和 Key^T 的点积，得到注意力分数矩阵\n",
    "# 矩阵乘法后，得到的矩阵形状为 [1, seq_length, seq_length] 即 [1, 20, 20]\n",
    "q_k_mul = torch.matmul(query_states, key_states)\n",
    "\n",
    "# 对计算出的注意力分数矩阵进行缩放\n",
    "# 缩放因子是 sqrt(hidden_size)，用于平衡点积值的大小，避免梯度消失或爆炸\n",
    "scaling_factor = math.sqrt(hidden_size)\n",
    "# 对 Q 和 K^T 的点积结果除以缩放因子\n",
    "scaled_q_k_mul = q_k_mul / scaling_factor\n",
    "\n",
    "# 对缩放后的分数矩阵应用 softmax 函数\n",
    "# softmax 将分数转换为概率分布，以便用于加权求和\n",
    "# `dim=-1` 表示在最后一个维度上进行 softmax 计算，即对每个 token 的注意力分数进行归一化\n",
    "softmax_out = torch.nn.functional.softmax(scaled_q_k_mul, dim=-1)\n",
    "\n",
    "# 使用 softmax 的输出作为权重，对 Value 矩阵进行加权求和，得到最终的注意力输出\n",
    "# 结果矩阵的形状为 [1, seq_length, hidden_size] 即 [1, 20, 896]\n",
    "attention_out = torch.matmul(softmax_out, value_states)\n",
    "\n",
    "# 打印最终的注意力输出的形状，验证结果是否正确\n",
    "print(attention_out.shape)  # 输出应为 [1, 20, 896]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "949c8cc6-68a6-47d3-bb7f-5f7e3e4d2487",
   "metadata": {},
   "source": [
    "## mha"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "03e6c2d9-0030-4fae-93bc-6c0dd8858703",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 20, 896])\n"
     ]
    }
   ],
   "source": [
    "# 定义输入句子的长度，这里假设句子包含 20 个 token\n",
    "seq_length = 20\n",
    "\n",
    "# 创建 Query (Q), Key (K), Value (V) 矩阵\n",
    "# 每个矩阵的形状为 [1, seq_length, hidden_size]，即 [1, 20, 896]\n",
    "query_states = torch.randn(1, seq_length, hidden_size)\n",
    "key_states = torch.randn(1, seq_length, hidden_size)\n",
    "value_states = torch.randn(1, seq_length, hidden_size)\n",
    "\n",
    "# 定义一个用于线性映射的权重矩阵，形状为 [896, 896]\n",
    "# 实际模型中，Q/K/V 的映射矩阵是不同的，但这里为了简化，使用相同的矩阵\n",
    "project_weight = torch.randn(hidden_size, hidden_size)\n",
    "\n",
    "# 对 Query、Key 和 Value 进行线性映射，得到形状为 [1, seq_length, hidden_size] 的结果\n",
    "query_states = torch.nn.functional.linear(query_states, project_weight)  # [1, 20, 896]\n",
    "key_states = torch.nn.functional.linear(key_states, project_weight)  # [1, 20, 896]\n",
    "value_states = torch.nn.functional.linear(value_states, project_weight)  # [1, 20, 896]\n",
    "\n",
    "# 对特征维度按照注意力头的数量（num_attention_heads = 14）进行拆分\n",
    "# 将 896 个特征拆分为 [14, 64] 的特征，得到形状为 [1, seq_length, num_attention_heads, feature_per_head]\n",
    "# 即 [1, 20, 14, 64]\n",
    "query_states = query_states.view(1, seq_length, num_attention_heads, feature_per_head)\n",
    "key_states = key_states.view(1, seq_length, num_attention_heads, feature_per_head)\n",
    "value_states = value_states.view(1, seq_length, num_attention_heads, feature_per_head)\n",
    "\n",
    "# 将 \"头\" 这一维度放在第二维度，即形状变为 [1, num_attention_heads, seq_length, feature_per_head]\n",
    "# 即 [1, 14, 20, 64]，表示每个头独立计算\n",
    "query_states = query_states.transpose(1, 2)\n",
    "key_states = key_states.transpose(1, 2)\n",
    "value_states = value_states.transpose(1, 2)\n",
    "\n",
    "# 对 Key 矩阵进行转置，使其形状变为 [1, num_attention_heads, feature_per_head, seq_length]\n",
    "# 这样可以与 Query 进行点积运算\n",
    "key_states = key_states.transpose(2, 3)  # [1, 14, 64, 20]\n",
    "\n",
    "# 计算 Query 和 Key^T 的点积，得到注意力分数矩阵\n",
    "# 形状为 [1, num_attention_heads, seq_length, seq_length] 即 [1, 14, 20, 20]\n",
    "q_k_mul = torch.matmul(query_states, key_states)\n",
    "\n",
    "# 对点积结果进行缩放，缩放因子为 sqrt(hidden_size)\n",
    "scaling_factor = math.sqrt(hidden_size)\n",
    "scaled_q_k_mul = q_k_mul / scaling_factor\n",
    "\n",
    "# 对缩放后的分数矩阵应用 softmax 函数，得到注意力权重\n",
    "# softmax 的输出形状仍为 [1, num_attention_heads, seq_length, seq_length] 即 [1, 14, 20, 20]\n",
    "softmax_out = torch.nn.functional.softmax(scaled_q_k_mul, dim=-1)\n",
    "\n",
    "# 使用注意力权重对 Value 矩阵进行加权求和，得到注意力输出\n",
    "# 形状为 [1, num_attention_heads, seq_length, feature_per_head] 即 [1, 14, 20, 64]\n",
    "attention_out = torch.matmul(softmax_out, value_states)\n",
    "\n",
    "# 将 \"头\" 这一维度再转置回原来的位置，使形状变为 [1, seq_length, num_attention_heads, feature_per_head]\n",
    "attention_out = attention_out.transpose(1, 2)  # [1, 20, 14, 64]\n",
    "\n",
    "# 将多个头的特征拼接回原来的 896 个特征，即形状变为 [1, seq_length, hidden_size] 即 [1, 20, 896]\n",
    "attention_out = attention_out.reshape(1, seq_length, hidden_size)\n",
    "\n",
    "# 使用线性映射层将特征再映射回去，得到最终的输出\n",
    "attention_out = torch.nn.functional.linear(attention_out, project_weight)  # [1, 20, 896]\n",
    "\n",
    "# 输出最终的注意力输出的形状，验证结果是否正确\n",
    "print(attention_out.shape)  # 输出应为 [1, 20, 896]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45768ddf-ae19-4ffa-ab51-cbb425f0c64e",
   "metadata": {},
   "source": [
    "## gqa"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "73c526bc-c5ff-49b0-8d2a-f1567729e09e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 20, 896])\n"
     ]
    }
   ],
   "source": [
    "# 定义输入句子的长度，这里假设句子包含 20 个 token\n",
    "seq_length = 20\n",
    "\n",
    "# 创建 Query (Q), Key (K), Value (V) 矩阵\n",
    "# 每个矩阵的形状为 [1, seq_length, hidden_size]，即 [1, 20, 896]\n",
    "query_states = torch.randn(1, seq_length, hidden_size)\n",
    "key_states = torch.randn(1, seq_length, hidden_size)\n",
    "value_states = torch.randn(1, seq_length, hidden_size)\n",
    "\n",
    "# 定义对 Query 进行线性映射的矩阵，映射后的特征维度仍然为 896\n",
    "weight_for_query = torch.randn(896, hidden_size)\n",
    "\n",
    "# 定义对 Key 和 Value 进行线性映射的矩阵，映射后的特征维度为 128\n",
    "# 这是因为在 GQA（分组查询注意力）机制中，Key 和 Value 的头的数量减少，因此总特征维度也相应减少。\n",
    "# 在 qwen2 模型中，Query 的 14 个头被分成了 7 组，因此 Key 和 Value 的特征维度为 896 / 7 = 128。\n",
    "weight_for_key_value = torch.randn(128, hidden_size)\n",
    "\n",
    "# 对 Query、Key、Value 进行线性映射，得到映射后的矩阵，注意三者的输出矩阵维度不再相同\n",
    "query_states = torch.nn.functional.linear(query_states, weight_for_query)  # [1, 20, 896]\n",
    "key_states = torch.nn.functional.linear(key_states, weight_for_key_value)  # [1, 20, 128]\n",
    "value_states = torch.nn.functional.linear(value_states, weight_for_key_value)  # [1, 20, 128]\n",
    "\n",
    "# 对特征维度进行拆分，将 Query 的 896 个特征拆分为 [14, 64] 的特征\n",
    "query_states = query_states.view(\n",
    "    1, seq_length, num_attention_heads, feature_per_head\n",
    ")  # [1, 20, 14, 64]\n",
    "\n",
    "# 对 Key 和 Value 的 128 个特征拆分为 [2, 64] 的特征\n",
    "key_states = key_states.view(1, seq_length, num_key_value_heads, feature_per_head)  # [1, 20, 2, 64]\n",
    "value_states = value_states.view(\n",
    "    1, seq_length, num_key_value_heads, feature_per_head\n",
    ")  # [1, 20, 2, 64]\n",
    "\n",
    "# 将头（Head）的维度放在第二维，以便每个头独立进行计算\n",
    "# Query 的维度变为 [1, 14, 20, 64]\n",
    "# Key 和 Value 的维度变为 [1, 2, 20, 64]\n",
    "query_states = query_states.transpose(1, 2)  # [1, 14, 20, 64]\n",
    "key_states = key_states.transpose(1, 2)  # [1, 2, 20, 64]\n",
    "value_states = value_states.transpose(1, 2)  # [1, 2, 20, 64]\n",
    "\n",
    "# 对 Key 矩阵进行转置，以便与 Query 进行点积运算\n",
    "key_states = key_states.transpose(2, 3)  # [1, 2, 64, 20]\n",
    "\n",
    "# 由于 Key 的头的数量为 2，而 Query 的头的数量为 14，两者维度不匹配，不能直接进行点积计算。\n",
    "# 为了实现 GQA（分组查询注意力）的功能，需要将 Key/Value 的头的数量复制 7 份，使其与 Query 的头数量一致。\n",
    "# 这体现了 Query 共享同一组 Key/Value 的概念。\n",
    "# 使用 torch.repeat_interleave 函数在头这一维度上重复 groups（7）次\n",
    "key_states = torch.repeat_interleave(key_states, repeats=groups, dim=1)  # [1, 14, 64, 20]\n",
    "value_states = torch.repeat_interleave(value_states, repeats=groups, dim=1)  # [1, 14, 20, 64]\n",
    "\n",
    "# 计算 Query 和 Key^T 的点积，得到注意力分数矩阵\n",
    "# 矩阵的形状为 [1, 14, 20, 20]\n",
    "q_k_mul = torch.matmul(query_states, key_states)\n",
    "\n",
    "# 对点积结果进行缩放，缩放因子为 sqrt(hidden_size)\n",
    "scaling_factor = math.sqrt(hidden_size)\n",
    "scaled_q_k_mul = q_k_mul / scaling_factor\n",
    "\n",
    "# 对缩放后的分数矩阵应用 softmax 函数，得到注意力权重\n",
    "softmax_out = torch.nn.functional.softmax(scaled_q_k_mul, dim=-1)  # [1, 14, 20, 20]\n",
    "\n",
    "# 使用注意力权重对 Value 矩阵进行加权求和，得到注意力输出\n",
    "attention_out = torch.matmul(softmax_out, value_states)  # [1, 14, 20, 64]\n",
    "\n",
    "# 将 \"头\" 这一维度再转置回原来的位置，使维度变为 [1, 20, 14, 64]\n",
    "attention_out = attention_out.transpose(1, 2)  # [1, 20, 14, 64]\n",
    "\n",
    "# 将多个头的输出拼接回原来的 896 个特征\n",
    "attention_out = attention_out.reshape(1, seq_length, hidden_size)  # [1, 20, 896]\n",
    "\n",
    "# 使用线性映射将拼接后的输出再次映射回去，得到最终的注意力输出\n",
    "attention_out = torch.nn.functional.linear(attention_out, weight_for_query)  # [1, 20, 896]\n",
    "\n",
    "# 打印最终的注意力输出的形状，验证结果是否正确\n",
    "print(attention_out.shape)  # 输出应为 [1, 20, 896]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8aae000f-518e-44d8-8d20-01ac2ab4b8dc",
   "metadata": {},
   "source": [
    "# Part3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "8a04f645-1de8-4aa9-a137-490a6b114fe2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成 sin/cos 旋转矩阵\n",
    "def generate_rope_matrix(hidden_dim, max_position_embeddings):\n",
    "    \"\"\"\n",
    "    生成用于旋转位置编码的 sin/cos 旋转矩阵。\n",
    "\n",
    "    参数:\n",
    "    hidden_dim: 词嵌入向量的特征维度（即每个嵌入向量的长度）\n",
    "    max_position_embeddings: 允许的最大位置嵌入数量，即模型可以表示的最大序列长度\n",
    "\n",
    "    返回值:\n",
    "    sin_val: sin 旋转矩阵，用于位置编码\n",
    "    cos_val: cos 旋转矩阵，用于位置编码\n",
    "    \"\"\"\n",
    "\n",
    "    # 生成 [0, 2, 4, ..., hidden_dim-2] 的序列，表示嵌入向量的偶数维度\n",
    "    # 这些偶数维度将用于构造 sin/cos 位置编码\n",
    "    seq_list = torch.arange(0, hidden_dim, 2, dtype=torch.int64).float()\n",
    "\n",
    "    # 计算 `2i / hidden_dim`，其中 i 为偶数维度索引\n",
    "    # 该操作决定了不同维度上的位置编码频率（频率与维度有关）\n",
    "    seq_list = seq_list / hidden_dim\n",
    "\n",
    "    # 计算 `10000^(2i/dim)`，这是位置编码公式中的一部分\n",
    "    # 使用 10000 作为基数，是为了让不同维度有不同的频率\n",
    "    seq_list = 10000**seq_list\n",
    "\n",
    "    # 计算 `1/(10000^(2i/dim))`，即旋转角度 θ 序列\n",
    "    # θ 是用于计算 sin/cos 的角度参数，定义了位置编码的频率\n",
    "    theta = 1.0 / seq_list\n",
    "\n",
    "    # 生成位置序列 [0, 1, 2, 3, ..., max_position_embeddings - 1]\n",
    "    # 这个序列代表了每个位置的索引，用于位置编码\n",
    "    t = torch.arange(max_position_embeddings, dtype=torch.int64).type_as(theta)\n",
    "\n",
    "    # 计算位置序列与旋转角度序列的外积，得到一个位置-频率矩阵\n",
    "    # freqs 矩阵的每一行对应一个位置，每一列对应一个频率\n",
    "    freqs = torch.outer(t, theta)\n",
    "\n",
    "    # 将 freqs 矩阵沿最后一维度复制，以便可以同时生成 sin 和 cos 编码\n",
    "    emb = torch.cat((freqs, freqs), dim=-1)\n",
    "\n",
    "    # 计算 sin 和 cos 旋转位置矩阵\n",
    "    # 这些矩阵将与输入的嵌入向量结合，用于位置编码\n",
    "    cos_val = emb.cos().to(torch.bfloat16)\n",
    "    sin_val = emb.sin().to(torch.bfloat16)\n",
    "\n",
    "    # 返回 sin 和 cos 位置矩阵给函数调用者\n",
    "    return sin_val, cos_val\n",
    "\n",
    "\n",
    "# 该函数用于对 query 向量进行旋转变换\n",
    "def rotate_half(x):\n",
    "    \"\"\"\n",
    "    将输入向量的后半部分旋转到前半部分，并将前半部分旋转到后半部分。\n",
    "\n",
    "    参数:\n",
    "    x: torch.Tensor 输入的向量张量，形状为 [..., dim]，dim 是特征维度。\n",
    "\n",
    "    返回值:\n",
    "    torch.Tensor 旋转后的向量张量，形状不变。\n",
    "    \"\"\"\n",
    "    # 将输入向量沿最后一个维度一分为二\n",
    "    x1 = x[..., : x.shape[-1] // 2]  # 取前半部分\n",
    "    x2 = x[..., x.shape[-1] // 2 :]  # 取后半部分\n",
    "\n",
    "    # 将后半部分旋转到前面，将前半部分旋转到后面，并沿最后一个维度拼接\n",
    "    # 具体来说，后半部分的符号取反，实现旋转的效果\n",
    "    return torch.cat((-x2, x1), dim=-1)\n",
    "\n",
    "\n",
    "# 该函数应用旋转位置编码（RoPE）到 Query 和 Key 上\n",
    "def apply_rotary_pos_emb(q, k, cos, sin, position_ids):\n",
    "    \"\"\"\n",
    "    将旋转位置编码（RoPE）应用到 Query 和 Key 上，以增强模型的位置信息。\n",
    "\n",
    "    参数:\n",
    "    q: torch.Tensor 输入的 Query 张量，形状为 [batch_size, num_heads, seq_length, head_dim]\n",
    "    k: torch.Tensor 输入的 Key 张量，形状同上\n",
    "    cos: torch.Tensor 预计算好的 cos 位置编码矩阵\n",
    "    sin: torch.Tensor 预计算好的 sin 位置编码矩阵\n",
    "    position_ids: torch.Tensor 表示每个 token 在序列中的实际位置，形状为 [batch_size, seq_length]\n",
    "\n",
    "    返回值:\n",
    "    q_embed, k_embed: 经过旋转位置编码变换后的 Query 和 Key 张量\n",
    "    \"\"\"\n",
    "    # 根据序列中的实际位置，从预计算的 cos 和 sin 矩阵中提取出相应位置的编码\n",
    "    # cos 和 sin 的形状会变为 [batch_size, seq_length, head_dim]\n",
    "    cos = cos[position_ids]\n",
    "    sin = sin[position_ids]\n",
    "\n",
    "    # 应用 LLaMa 模型中的旋转位置编码公式，进行位置编码变换\n",
    "    # 对 Query 向量进行旋转变换，然后结合 cos 和 sin 进行加权，得到最终的 Query 编码\n",
    "    q_embed = (q * cos) + (rotate_half(q) * sin)\n",
    "\n",
    "    # 对 Key 向量进行同样的旋转位置编码变换\n",
    "    k_embed = (k * cos) + (rotate_half(k) * sin)\n",
    "\n",
    "    # 返回经过位置编码后的 Query 和 Key 向量\n",
    "    return q_embed, k_embed\n",
    "\n",
    "\n",
    "# 代码运行之初调用一次即可\n",
    "global_sin_matrix, global_cos_matrix = generate_rope_matrix(\n",
    "    feature_per_head, max_position_embeddings\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "8d87e7ae-6212-46ef-b9dd-c0a052fb9b65",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "query after RoPE: torch.Size([1, 20, 64])\n",
      "key after RoPE: torch.Size([1, 20, 64])\n"
     ]
    }
   ],
   "source": [
    "# 定义句子长度为 20\n",
    "seq_length = 20\n",
    "\n",
    "# 定义 query/key 为 [seq_length, feature_per_head]\n",
    "query = torch.randn(1, seq_length, feature_per_head)\n",
    "key = torch.randn(1, seq_length, feature_per_head)\n",
    "\n",
    "# 获得本次推理时query/key的位置id，用于从全部的 sin/cos 变换矩阵中切出来需要的位置的数据\n",
    "position_id = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 将本次推理时的 query 应用到旋转位置编码上\n",
    "new_query, new_key = apply_rotary_pos_emb(\n",
    "    query, key, global_sin_matrix, global_cos_matrix, position_id\n",
    ")\n",
    "\n",
    "print(f\"query after RoPE: {new_query.shape}\")\n",
    "print(f\"key after RoPE: {new_key.shape}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9126c6ec-700d-4b01-8c0d-99580b03d54e",
   "metadata": {},
   "source": [
    "# Part4"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b49ab66f-d3a9-4237-a73d-7d22d368b018",
   "metadata": {},
   "source": [
    "## KVCache"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "86f6ddda-9149-4567-98ee-3801899b2691",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个 KVCache 类，用于缓存注意力层的 Key 和 Value 状态\n",
    "class KVCache:\n",
    "    def __init__(self) -> None:\n",
    "        \"\"\"\n",
    "        KVCache 类的构造函数。\n",
    "\n",
    "        该类用于存储和管理多层注意力模型中的 Key 和 Value 缓存。\n",
    "        每一层的 Key 和 Value 都存储在一个可变长度的列表中，方便在生成过程中逐步追加新计算的状态。\n",
    "        \"\"\"\n",
    "        # 类中的 KCache 和 VCache 分别用于存储 Key 和 Value 的缓存\n",
    "        # 这两个属性都是可变长度的列表，每一层注意力对应一个缓存列表\n",
    "        self.KCache: List[torch.tensor] = []\n",
    "        self.VCache: List[torch.tensor] = []\n",
    "\n",
    "    def update(self, new_key_states, new_value_states, layer_idx):\n",
    "        \"\"\"\n",
    "        更新 KVCache，将新的 Key 和 Value 状态添加到指定层的缓存中。\n",
    "\n",
    "        参数:\n",
    "        new_key_states: 新计算的 Key 状态，形状为 [batch_size, seq_length, hidden_dim]\n",
    "        new_value_states: 新计算的 Value 状态，形状同上\n",
    "        layer_idx: 表示第几层注意力层（索引从 0 开始）\n",
    "\n",
    "        返回值:\n",
    "        更新后的这一层的 Key 和 Value 缓存。\n",
    "        \"\"\"\n",
    "        # 如果缓存的层数少于 layer_idx，说明当前层的 Key 和 Value 是新的层，需要初始化缓存\n",
    "        if len(self.KCache) <= layer_idx:\n",
    "            # 在对应层的位置初始化缓存，存储当前的 Key 和 Value 状态\n",
    "            self.KCache.append(new_key_states)\n",
    "            self.VCache.append(new_value_states)\n",
    "        else:\n",
    "            # 如果当前层已经存在缓存，则将新生成的 Key 和 Value 状态追加到现有缓存的后面\n",
    "            # 追加操作是在 token 序列长度的维度（dim=-2）上进行的\n",
    "            self.KCache[layer_idx] = torch.cat([self.KCache[layer_idx], new_key_states], dim=-2)\n",
    "            self.VCache[layer_idx] = torch.cat([self.VCache[layer_idx], new_value_states], dim=-2)\n",
    "\n",
    "        # 返回更新后的这一层的 Key 和 Value 缓存\n",
    "        return self.KCache[layer_idx], self.VCache[layer_idx]\n",
    "\n",
    "    # 获取某一层中已经缓存的 token 的数量（即序列长度 seq_length）\n",
    "    def get_seq_length(self, layer_idx) -> int:\n",
    "        \"\"\"\n",
    "        获取指定层中已经缓存的 token 数量（即序列长度）。\n",
    "\n",
    "        参数:\n",
    "        layer_idx: 要查询的注意力层索引\n",
    "\n",
    "        返回值:\n",
    "        已缓存的 token 数量，如果该层还没有缓存数据，则返回 0。\n",
    "        \"\"\"\n",
    "        # 如果请求的层还没有缓存，则返回 0\n",
    "        if len(self.KCache) <= layer_idx:\n",
    "            return 0\n",
    "        # 返回该层缓存的 token 数量，即缓存的 Key 状态的序列长度\n",
    "        return self.KCache[layer_idx].shape[-2]\n",
    "\n",
    "    # 打印指定层缓存的 token 数量\n",
    "    def print(self, layer_idx):\n",
    "        \"\"\"\n",
    "        打印指定层中已经缓存的 token 数量。\n",
    "\n",
    "        参数:\n",
    "        layer_idx: 要查询的注意力层索引\n",
    "        \"\"\"\n",
    "        # 如果缓存为空，输出提示信息\n",
    "        if len(self.KCache) == 0:\n",
    "            print(\"缓存为空\")\n",
    "        else:\n",
    "            # 打印指定层缓存的 token 数量\n",
    "            print(f\"层 {layer_idx} 缓存的 token 数：\", self.KCache[layer_idx].shape[-2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "4e8161ef-5fd0-4d2d-b6a3-e85ebe947550",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "缓存为空\n",
      "层 0 缓存的 token 数： 5\n",
      "层 1 缓存的 token 数： 1\n",
      "层 0 缓存的 token 数： 7\n"
     ]
    }
   ],
   "source": [
    "# 初始化新的 Key 和 Value 状态，假设有 5 个 token，每个 token 的特征维度为 896\n",
    "new_k_state = torch.randn(1, 5, 896)\n",
    "new_v_state = torch.randn(1, 5, 896)\n",
    "\n",
    "# 初始化 KVCache 实例\n",
    "kv_cache = KVCache()\n",
    "\n",
    "# 打印第 0 层的缓存状态，初始时应该为空\n",
    "kv_cache.print(0)\n",
    "\n",
    "# 更新第 0 层的缓存，将新生成的 Key 和 Value 状态添加进去\n",
    "kv_cache.update(new_k_state, new_v_state, 0)\n",
    "\n",
    "# 再次打印第 0 层的缓存状态，此时应该有 5 个 token 被缓存\n",
    "kv_cache.print(0)\n",
    "\n",
    "# 初始化新的 Key 和 Value 状态，假设有 1 个 token，每个 token 的特征维度为 896\n",
    "new_k_state1 = torch.randn(1, 1, 896)\n",
    "new_v_state1 = torch.randn(1, 1, 896)\n",
    "\n",
    "# 更新第 1 层的缓存，将新生成的 Key 和 Value 状态添加进去\n",
    "kv_cache.update(new_k_state1, new_v_state1, 1)\n",
    "\n",
    "# 打印第 1 层的缓存状态，此时应该有 1 个 token 被缓存\n",
    "kv_cache.print(1)\n",
    "\n",
    "# 初始化新的 Key 和 Value 状态，假设有 2 个 token，每个 token 的特征维度为 896\n",
    "new_k_state2 = torch.randn(1, 2, 896)\n",
    "new_v_state2 = torch.randn(1, 2, 896)\n",
    "\n",
    "# 更新第 0 层的缓存，将新生成的 Key 和 Value 状态添加进去\n",
    "kv_cache.update(new_k_state2, new_v_state2, 0)\n",
    "\n",
    "# 再次打印第 0 层的缓存状态，此时应该有 7 个 token 被缓存\n",
    "kv_cache.print(0)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8d3d9281-f243-4f16-85eb-8a5f87ffec17",
   "metadata": {},
   "source": [
    "## 插入位置编码的 GQA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "538ee1db-dbfa-4f32-87ca-ea330a8a914a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 20, 896])\n"
     ]
    }
   ],
   "source": [
    "# 处理的 token 个数，表示当前句子中的 token 数量\n",
    "seq_length = 20\n",
    "\n",
    "# 定义 Query、Key、Value 矩阵，形状为 [1, 20, 896]\n",
    "# 表示一个包含 20 个 token 的句子，且每个 token 的特征维度为 896\n",
    "query_states = torch.randn(1, seq_length, hidden_size)\n",
    "key_states = torch.randn(1, seq_length, hidden_size)\n",
    "value_states = torch.randn(1, seq_length, hidden_size)\n",
    "\n",
    "# 通过线性映射对 Query 进行变换，映射后的特征维度仍然为 896\n",
    "weight_for_query = torch.randn(896, hidden_size)\n",
    "# 通过线性映射对 Key 和 Value 进行变换，映射后的特征维度为 128\n",
    "# 这是因为 GQA 机制中 Key 和 Value 的头的数量减少，因此总特征维度也需要减少\n",
    "# Query 的头被分成了 7 组，因此 Key 和 Value 的特征数为 896 / 7 = 128\n",
    "weight_for_key_value = torch.randn(128, hidden_size)\n",
    "\n",
    "# 对 Query、Key 和 Value 进行线性映射，得到映射后的矩阵，注意三者的输出矩阵维度不再相同\n",
    "query_states = torch.nn.functional.linear(query_states, weight_for_query)  # [1, 20, 896]\n",
    "key_states = torch.nn.functional.linear(key_states, weight_for_key_value)  # [1, 20, 128]\n",
    "value_states = torch.nn.functional.linear(value_states, weight_for_key_value)  # [1, 20, 128]\n",
    "\n",
    "# 对特征维度进行拆分，将 Query 的 896 个特征拆分为 [14, 64] 的特征\n",
    "query_states = query_states.view(\n",
    "    1, seq_length, num_attention_heads, feature_per_head\n",
    ")  # [1, 20, 14, 64]\n",
    "# 对特征维度进行拆分，将 Key 和 Value 的 128 个特征拆分为 [2, 64] 的特征\n",
    "key_states = key_states.view(1, seq_length, num_key_value_heads, feature_per_head)  # [1, 20, 2, 64]\n",
    "value_states = value_states.view(\n",
    "    1, seq_length, num_key_value_heads, feature_per_head\n",
    ")  # [1, 20, 2, 64]\n",
    "\n",
    "# 将“头”这一维度 (14 或 2) 放在高维，这样维度的描述就变成了：\n",
    "# 1 个句子有 14（或 2）个头，每个头有 20 个 token 序列，每个 token 有 64 个特征\n",
    "# Query 和 Key/Value 的区别在于头的数量不同\n",
    "query_states = query_states.transpose(1, 2)  # [1, 14, 20, 64]\n",
    "key_states = key_states.transpose(1, 2)  # [1, 2, 20, 64]\n",
    "value_states = value_states.transpose(1, 2)  # [1, 2, 20, 64]\n",
    "\n",
    "# -------------------- 嵌入位置编码 ----------------------------\n",
    "# 生成句子中实际 token 的位置，例如有 20 个 token，那么生成的位置为 [0, 1, 2, ..., 19]\n",
    "position_id = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 从全局旋转矩阵中抽取出对应位置的旋转矩阵\n",
    "# 因为全局旋转矩阵是基于 `max_position_embeddings` 生成的，这里只需要抽取前 20 个位置的 sin/cos 矩阵\n",
    "cos = global_cos_matrix[:seq_length]\n",
    "sin = global_sin_matrix[:seq_length]\n",
    "\n",
    "# 使用 LLaMa 版本的旋转位置编码公式，应用旋转位置编码到 Query 和 Key 上\n",
    "query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_id)\n",
    "# ------------------- 嵌入位置编码完成 --------------------------\n",
    "\n",
    "# 在计算 Query 和 Key 的点积之前，由于 Key 的头的数量为 2，而 Query 的头的数量为 14\n",
    "# 直接计算会导致维度不匹配。为了实现 GQA 机制中 Query 共享一组 Key/Value 的逻辑，\n",
    "# 需要将 Key 的头的数量复制 7 份，使 Query 和 Key 的头的数量一致，但 Key 的数值是复制的。\n",
    "# 使用 `torch.repeat_interleave` 函数在第 1 维（头所在的维度）复制 7 份，得到和 Query 一样的头的数量\n",
    "key_states = torch.repeat_interleave(key_states, repeats=groups, dim=1)  # [1, 14, 20, 64]\n",
    "value_states = torch.repeat_interleave(value_states, repeats=groups, dim=1)  # [1, 14, 20, 64]\n",
    "\n",
    "# 直接使用 PyTorch 的 `scaled_dot_product_attention` 函数计算注意力输出\n",
    "attention_out = torch.nn.functional.scaled_dot_product_attention(\n",
    "    query_states, key_states, value_states\n",
    ")\n",
    "\n",
    "# 将“头”这一维度再转置回原来的位置，得到 [1, 20, 14, 64]\n",
    "attention_out = attention_out.transpose(1, 2)  # [1, 20, 14, 64]\n",
    "\n",
    "# 将多个头的特征拼接回原来的 896 个特征\n",
    "attention_out = attention_out.reshape(1, seq_length, hidden_size)  # [1, 20, 896]\n",
    "\n",
    "# 使用线性映射层将拼接后的特征再映射回去\n",
    "attention_out = torch.nn.functional.linear(attention_out, weight_for_query)  # [1, 20, 896]\n",
    "\n",
    "# 打印最终输出的形状，验证结果是否正确\n",
    "print(attention_out.shape)  # 输出应为 [1, 20, 896]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f8a92a76-f71e-4bcd-b86d-c808ae2d1cef",
   "metadata": {},
   "source": [
    "## 插入位置编码和 KVCache 的 GQA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "0fa8fc31-ff5d-4ff0-b333-2c49524f90e0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 20, 896])\n"
     ]
    }
   ],
   "source": [
    "# 处理的 token 个数，表示当前句子中包含的 token 数量\n",
    "seq_length = 20\n",
    "\n",
    "# 实例化 KVCache，用于存储和管理注意力层的 Key 和 Value 缓存\n",
    "kv_cache = KVCache()\n",
    "\n",
    "# 创建 Query, Key, Value 矩阵，形状为 [1, 20, 896]\n",
    "# 表示一个包含 20 个 token 的句子，且每个 token 的特征维度为 896\n",
    "query_states = torch.randn(1, seq_length, hidden_size)\n",
    "key_states = torch.randn(1, seq_length, hidden_size)\n",
    "value_states = torch.randn(1, seq_length, hidden_size)\n",
    "\n",
    "# 对 Query 进行线性映射，映射后的特征维度仍然为 896\n",
    "weight_for_query = torch.randn(896, hidden_size)\n",
    "# 对 Key 和 Value 进行线性映射，映射后的特征维度为 128\n",
    "# 这是因为在 GQA 机制中，Key 和 Value 的头的数量减少，因此总特征维度也减少\n",
    "weight_for_key_value = torch.randn(128, hidden_size)\n",
    "\n",
    "# 分别对 Query、Key 和 Value 进行线性映射，得到映射后的矩阵\n",
    "# 注意三者的输出矩阵维度不再相同\n",
    "query_states = torch.nn.functional.linear(query_states, weight_for_query)  # [1, 20, 896]\n",
    "key_states = torch.nn.functional.linear(key_states, weight_for_key_value)  # [1, 20, 128]\n",
    "value_states = torch.nn.functional.linear(value_states, weight_for_key_value)  # [1, 20, 128]\n",
    "\n",
    "# 对 Query 的特征维度进行拆分，将 896 个特征拆分为 [14, 64] 的特征\n",
    "query_states = query_states.view(\n",
    "    1, seq_length, num_attention_heads, feature_per_head\n",
    ")  # [1, 20, 14, 64]\n",
    "# 对 Key 和 Value 的特征维度进行拆分，将 128 个特征拆分为 [2, 64] 的特征\n",
    "key_states = key_states.view(1, seq_length, num_key_value_heads, feature_per_head)  # [1, 20, 2, 64]\n",
    "value_states = value_states.view(\n",
    "    1, seq_length, num_key_value_heads, feature_per_head\n",
    ")  # [1, 20, 2, 64]\n",
    "\n",
    "# 将“头”这一维度（14 或 2）放在高维，使维度信息描述为：\n",
    "# 1 个句子有 14（或 2）个头，每个头有 20 个 token 序列，每个 token 有 64 个特征\n",
    "# Query 和 Key/Value 的区别在于头的个数不同\n",
    "query_states = query_states.transpose(1, 2)  # [1, 14, 20, 64]\n",
    "key_states = key_states.transpose(1, 2)  # [1, 2, 20, 64]\n",
    "value_states = value_states.transpose(1, 2)  # [1, 2, 20, 64]\n",
    "\n",
    "# --------------------  嵌入位置编码  ----------------------------\n",
    "# 生成句子中每个 token 的位置 ID，例如有 20 个 token，生成的位置为 [0, 1, 2, ..., 19]\n",
    "position_id = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 从全局旋转矩阵中抽取出对应位置的旋转矩阵\n",
    "# 由于全局旋转矩阵包含了所有位置的信息，这里只抽取前 20 个位置的 sin/cos 矩阵\n",
    "cos = global_cos_matrix[:seq_length]\n",
    "sin = global_sin_matrix[:seq_length]\n",
    "\n",
    "# 使用 LLaMa 版本的旋转位置编码公式，应用位置编码到 Query 和 Key 上\n",
    "query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_id)\n",
    "# ------------------- 嵌入位置编码完成 --------------------------\n",
    "\n",
    "# -------------------- 嵌入 KVCache 优化 -------------------------\n",
    "# 这里模拟第 0 层的缓存操作\n",
    "# `kv_cache.update` 返回的是更新后的 Key 和 Value 状态\n",
    "key_states, value_states = kv_cache.update(key_states, value_states, layer_idx=0)\n",
    "# -------------------- 嵌入 KVCache 完成 -------------------------\n",
    "\n",
    "# 在计算 Query 和 Key 的点积之前，由于 Key 的头的数量为 2，而 Query 的头的数量为 14，\n",
    "# 直接计算会导致维度不匹配。为了体现分组 Query 共享一组 Key/Value 的逻辑（即 GQA 的目的），\n",
    "# 需要将 Key 的头的数量复制 7 份，使 Query 和 Key 的头的数量一致，\n",
    "# 这体现了 Query 共享相同 Key 的含义。\n",
    "# 使用 torch.repeat_interleave 在第 1 维（头的维度）上复制 7 份，以匹配 Query 的头数量\n",
    "key_states = torch.repeat_interleave(key_states, repeats=groups, dim=1)  # [1, 14, 20, 64]\n",
    "value_states = torch.repeat_interleave(value_states, repeats=groups, dim=1)  # [1, 14, 20, 64]\n",
    "\n",
    "# 使用 PyTorch 的 scaled_dot_product_attention 函数计算注意力输出\n",
    "attention_out = torch.nn.functional.scaled_dot_product_attention(\n",
    "    query_states, key_states, value_states\n",
    ")\n",
    "\n",
    "# 将“头”这一维度再转置回原来的位置，得到 [1, 20, 14, 64]\n",
    "attention_out = attention_out.transpose(1, 2)  # [1, 20, 14, 64]\n",
    "\n",
    "# 将多个头的特征拼接回原来的 896 个特征\n",
    "attention_out = attention_out.reshape(1, seq_length, hidden_size)  # [1, 20, 896]\n",
    "\n",
    "# 使用线性映射层将拼接后的特征再映射回去\n",
    "attention_out = torch.nn.functional.linear(attention_out, weight_for_query)  # [1, 20, 896]\n",
    "\n",
    "# 打印最终输出的形状，验证结果是否正确\n",
    "print(attention_out.shape)  # 输出应为 [1, 20, 896]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eb718c41-83ac-416c-a53f-731a149c7355",
   "metadata": {},
   "source": [
    "## 封装自定义 GQA"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "7b8aa1ba-5318-4c58-914a-910a308105b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def my_own_gqa(layer_idx, hidden_states, kv_cache, position_id):\n",
    "    \"\"\"\n",
    "    实现自定义的分组查询注意力机制 (GQA)，并且应用位置编码和 KV 缓存优化。\n",
    "\n",
    "    参数:\n",
    "    - layer_idx: int，表示当前处理的是第几层的注意力层\n",
    "    - hidden_states: torch.Tensor，输入的隐藏状态，形状为 [1, seq_length, hidden_size]\n",
    "    - kv_cache: KVCache 对象，用于缓存 Key 和 Value\n",
    "    - position_id: torch.Tensor，表示每个 token 的位置 ID，形状为 [1, seq_length]\n",
    "\n",
    "    返回值:\n",
    "    - attn_out: torch.Tensor，经过 GQA 和位置编码处理后的注意力输出\n",
    "    - kv_cache: KVCache 对象，更新后的 KV 缓存\n",
    "    \"\"\"\n",
    "    # 获取输入的序列长度, -2 代表倒数第二个维度\n",
    "    seq_length = hidden_states.size()[-2]\n",
    "\n",
    "    # 对 Query 进行线性映射，得到映射后的 Query 矩阵，形状为 [1, seq_length, 896]\n",
    "    query_states = torch.nn.functional.linear(\n",
    "        hidden_states,\n",
    "        model.model.layers[layer_idx].self_attn.q_proj.weight,\n",
    "        model.model.layers[layer_idx].self_attn.q_proj.bias,\n",
    "    )\n",
    "\n",
    "    # 对 Key 进行线性映射，得到映射后的 Key 矩阵，形状为 [1, seq_length, 128]\n",
    "    key_states = torch.nn.functional.linear(\n",
    "        hidden_states,\n",
    "        model.model.layers[layer_idx].self_attn.k_proj.weight,\n",
    "        model.model.layers[layer_idx].self_attn.k_proj.bias,\n",
    "    )\n",
    "\n",
    "    # 对 Value 进行线性映射，得到映射后的 Value 矩阵，形状为 [1, seq_length, 128]\n",
    "    value_states = torch.nn.functional.linear(\n",
    "        hidden_states,\n",
    "        model.model.layers[layer_idx].self_attn.v_proj.weight,\n",
    "        model.model.layers[layer_idx].self_attn.v_proj.bias,\n",
    "    )\n",
    "\n",
    "    # 对特征维度进行拆分，将 Query 的 896 个特征拆分为 [14, 64] 的特征\n",
    "    query_states = query_states.view(\n",
    "        1, seq_length, num_attention_heads, feature_per_head\n",
    "    )  # [1, seq_length, 14, 64]\n",
    "\n",
    "    # 将 Key 和 Value 的 128 个特征拆分为 [2, 64] 的特征\n",
    "    key_states = key_states.view(\n",
    "        1, seq_length, num_key_value_heads, feature_per_head\n",
    "    )  # [1, seq_length, 2, 64]\n",
    "    value_states = value_states.view(\n",
    "        1, seq_length, num_key_value_heads, feature_per_head\n",
    "    )  # [1, seq_length, 2, 64]\n",
    "\n",
    "    # 交换维度，将 \"头\" 维度 (14 或 2) 放在第二维，方便后续计算\n",
    "    query_states = query_states.transpose(1, 2)  # [1, 14, seq_length, 64]\n",
    "    key_states = key_states.transpose(1, 2)  # [1, 2, seq_length, 64]\n",
    "    value_states = value_states.transpose(1, 2)  # [1, 2, seq_length, 64]\n",
    "\n",
    "    # 计算 KV 缓存后的总序列长度，包括当前序列长度和缓存序列长度\n",
    "    kv_seq_len = seq_length + kv_cache.get_seq_length(layer_idx)\n",
    "\n",
    "    # --------------------  应用旋转位置编码  ----------------------------\n",
    "    # 从全局旋转矩阵中提取当前序列长度所需的 cos 和 sin 矩阵\n",
    "    cos = global_cos_matrix[:kv_seq_len]\n",
    "    sin = global_sin_matrix[:kv_seq_len]\n",
    "\n",
    "    # 使用旋转位置编码将位置信息应用到 Query 和 Key 上\n",
    "    query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_id)\n",
    "    # ------------------- 旋转位置编码完成  ------------------------------\n",
    "\n",
    "    # --------------------  更新 KV 缓存  -------------------------\n",
    "    # 将当前计算的 Key 和 Value 状态更新到 KV 缓存中，返回更新后的 Key 和 Value\n",
    "    key_states, value_states = kv_cache.update(key_states, value_states, layer_idx)\n",
    "    # --------------------  KV 缓存更新完成  -------------------------\n",
    "\n",
    "    # 在计算 Query 和 Key 的点积之前，需要处理头的数量不匹配的问题：\n",
    "    # Key 的头数量为 2，而 Query 的头数量为 14，直接计算会导致维度不匹配。\n",
    "    # 因此，需要将 Key 的头数量复制 7 份，使得 Query 和 Key 的头数量一致，体现分组共享的逻辑（GQA）。\n",
    "    key_states = torch.repeat_interleave(\n",
    "        key_states, repeats=groups, dim=1\n",
    "    )  # [1, 14, seq_length, 64]\n",
    "    value_states = torch.repeat_interleave(\n",
    "        value_states, repeats=groups, dim=1\n",
    "    )  # [1, 14, seq_length, 64]\n",
    "\n",
    "    # 计算多头注意力，使用 PyTorch 的 scaled_dot_product_attention 函数\n",
    "    # is_causal 表示是否为自回归模型，这里根据序列长度来确定\n",
    "    is_causal = seq_length > 1\n",
    "    attention_out = torch.nn.functional.scaled_dot_product_attention(\n",
    "        query_states, key_states, value_states, is_causal=is_causal\n",
    "    )\n",
    "\n",
    "    # 将“头”维度再交换回来，恢复原来的形状 [1, seq_length, 14, 64]\n",
    "    attention_out = attention_out.transpose(1, 2)  # [1, seq_length, 14, 64]\n",
    "\n",
    "    # 将多个头的输出拼接回原来的特征维度，恢复为 [1, seq_length, 896]\n",
    "    attention_out = attention_out.reshape(1, seq_length, hidden_size)\n",
    "\n",
    "    # 通过线性映射层将拼接后的输出映射回去，最终得到 [1, seq_length, 896] 的输出\n",
    "    attn_out = torch.nn.functional.linear(\n",
    "        attention_out, model.model.layers[layer_idx].self_attn.o_proj.weight\n",
    "    )\n",
    "\n",
    "    return attn_out, kv_cache"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8aa13d70-2e60-4729-83cc-acd3acc011bc",
   "metadata": {},
   "source": [
    "## 测试以下封装的自定义 GQA 的功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "48fdd1e1-3205-41e9-80ee-5468015ee4b4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 20, 896])\n",
      "层 0 缓存的 token 数： 20\n"
     ]
    }
   ],
   "source": [
    "# 定义输入的隐藏状态，形状为 [1, 20, 896]，表示包含 20 个 token 的序列\n",
    "seq_length = 20\n",
    "hidden_states = torch.randn(1, seq_length, hidden_size).to(torch.bfloat16)\n",
    "\n",
    "# 实例化 KVCache，用于存储和管理注意力层的 Key 和 Value 缓存\n",
    "kv_cache = KVCache()\n",
    "\n",
    "# 生成位置 ID，表示每个 token 在序列中的位置\n",
    "position_id = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 测试第一层的自定义 GQA 机制\n",
    "layer_idx = 0\n",
    "attn_out, past_kv_cache = my_own_gqa(layer_idx, hidden_states, kv_cache, position_id)\n",
    "\n",
    "# 输出注意力结果的形状，验证计算是否正确\n",
    "print(attn_out.shape)\n",
    "\n",
    "# 打印 KV 缓存的信息，验证缓存是否更新\n",
    "past_kv_cache.print(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "325ee72c-7259-43cb-a676-1f038fc3d699",
   "metadata": {},
   "source": [
    "# Part5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "9c45f489-357f-48b2-a4fa-0856da8cbea9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def my_mlp(layer_idx, hidden_state: torch.tensor):\n",
    "    \"\"\"\n",
    "    实现前馈神经网络 MLP 的计算，实际就是文章中的 FFN 层\n",
    "\n",
    "    参数:\n",
    "    - layer_idx: int，表示当前处理的是第几层\n",
    "    - hidden_state: torch.Tensor，输入的隐藏状态，形状为 [batch_size, seq_length, hidden_size]\n",
    "\n",
    "    返回值:\n",
    "    - down_proj: torch.Tensor，经过 MLP 处理后的输出\n",
    "    \"\"\"\n",
    "    # 线性变换，应用 gate_proj 权重矩阵\n",
    "    gate_proj = torch.nn.functional.linear(\n",
    "        hidden_state, model.model.layers[layer_idx].mlp.gate_proj.weight\n",
    "    )\n",
    "    # 线性变换，应用 up_proj 权重矩阵\n",
    "    up_proj = torch.nn.functional.linear(\n",
    "        hidden_state, model.model.layers[layer_idx].mlp.up_proj.weight\n",
    "    )\n",
    "    # 使用 SiLU 激活函数，并进行逐元素相乘，再通过 down_proj 权重矩阵进行线性变换\n",
    "    down_proj = torch.nn.functional.linear(\n",
    "        torch.functional.F.silu(gate_proj) * up_proj,\n",
    "        model.model.layers[layer_idx].mlp.down_proj.weight,\n",
    "    )\n",
    "    return down_proj\n",
    "\n",
    "\n",
    "# 实现 RMSNorm\n",
    "def my_rms_norm(states, weights, eps):\n",
    "    \"\"\"\n",
    "    实现 RMSNorm 层的计算，用于标准化输入张量。\n",
    "\n",
    "    参数:\n",
    "    - states: torch.Tensor，输入张量\n",
    "    - weights: torch.Tensor，RMSNorm 层的权重\n",
    "    - eps: float，防止除零的 epsilon 值\n",
    "\n",
    "    返回值:\n",
    "    - torch.Tensor，经过 RMSNorm 标准化后的张量\n",
    "    \"\"\"\n",
    "    # 将输入转换为 float32 类型\n",
    "    states = states.to(torch.float32)\n",
    "    # 计算方差，沿最后一个维度进行平均\n",
    "    variance = states.pow(2).mean(-1, keepdim=True)\n",
    "    # 计算标准化后的张量\n",
    "    states = states * torch.rsqrt(variance + eps)\n",
    "    # 将标准化后的张量乘以权重，并转换回权重的类型\n",
    "    return weights * states.to(weights.dtype)\n",
    "\n",
    "\n",
    "# 实现解码器层的前向计算\n",
    "def my_decoder(states, layer_idx, position_id, past_key_value):\n",
    "    \"\"\"\n",
    "    实现解码器层的前向计算，包括注意力机制和 MLP 层。\n",
    "\n",
    "    参数:\n",
    "    - states: torch.Tensor，输入的隐藏状态\n",
    "    - layer_idx: int，表示当前处理的是第几层\n",
    "    - position_id: torch.Tensor，表示每个 token 的位置 ID\n",
    "    - past_key_value: KVCache 对象，用于缓存 Key 和 Value\n",
    "\n",
    "    返回值:\n",
    "    - out: torch.Tensor，经过解码器层处理后的输出\n",
    "    - present_key_value: KVCache 对象，更新后的 KV 缓存\n",
    "    \"\"\"\n",
    "    residual = states\n",
    "\n",
    "    # 输入层的 Norm 运算\n",
    "    out = my_rms_norm(states, model.model.layers[layer_idx].input_layernorm.weight, rms_norm_eps)\n",
    "    # 计算自定义的 GQA 注意力层输出，并更新 KV 缓存\n",
    "    out, present_key_value = my_own_gqa(layer_idx, out, past_key_value, position_id)\n",
    "    # 残差连接\n",
    "    out = out + residual\n",
    "\n",
    "    residual = out\n",
    "    # 注意力后层的 Norm 运算\n",
    "    out = my_rms_norm(\n",
    "        out, model.model.layers[layer_idx].post_attention_layernorm.weight, rms_norm_eps\n",
    "    )\n",
    "    # 计算 mlp 输出\n",
    "    out = my_mlp(layer_idx, out)\n",
    "    # 残差连接\n",
    "    out = out + residual\n",
    "\n",
    "    return out, present_key_value\n",
    "\n",
    "\n",
    "# 实现整个模型的前向计算\n",
    "def my_module(states, past_key_value, position_id):\n",
    "    \"\"\"\n",
    "    实现整个模型的前向计算，包括所有的解码器层。\n",
    "\n",
    "    参数:\n",
    "    - states: torch.Tensor，输入的隐藏状态\n",
    "    - past_key_value: KVCache 对象，用于缓存 Key 和 Value\n",
    "    - position_id: torch.Tensor，表示每个 token 的位置 ID\n",
    "\n",
    "    返回值:\n",
    "    - states: torch.Tensor，经过所有解码器层处理后的输出\n",
    "    - past_key_value: KVCache 对象，更新后的 KV 缓存\n",
    "    \"\"\"\n",
    "    # 遍历所有的解码器层，依次进行处理\n",
    "    for layer_idx in range(num_hidden_layers):\n",
    "        states, present_key_value = my_decoder(states, layer_idx, position_id, past_key_value)\n",
    "\n",
    "    # 更新 KV 缓存\n",
    "    past_key_value = present_key_value\n",
    "    # 最后进行 RMSNorm 归一化\n",
    "    states = my_rms_norm(states, model.model.norm.weight, rms_norm_eps)\n",
    "    return states, past_key_value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "107144c4-fbb2-4293-bdb7-4fb3acfd7830",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 15, 896])\n",
      "层 0 缓存的 token 数： 15\n"
     ]
    }
   ],
   "source": [
    "# 定义一个包含 15 个 token 的输入状态，形状为 [1, 15, 896]\n",
    "seq_length = 15\n",
    "hidden_states = torch.randn(1, seq_length, hidden_size).to(torch.bfloat16)\n",
    "\n",
    "# 实例化 KVCache，用于存储和管理注意力层的 Key 和 Value 缓存\n",
    "kv_cache = KVCache()\n",
    "\n",
    "# 生成位置 ID，表示每个 token 的位置\n",
    "position_id = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 调用 my_module 进行前向计算\n",
    "states, past_kv_cache = my_module(hidden_states, kv_cache, position_id)\n",
    "\n",
    "# 输出结果的形状，验证计算是否正确\n",
    "print(states.shape)\n",
    "# 打印 KV 缓存的内容，验证缓存是否更新\n",
    "past_kv_cache.print(0)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4665e149-230b-4fff-99d3-18450af48678",
   "metadata": {},
   "source": [
    "# Part6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "a7156fae-d46e-41b7-9b25-4828350aa7a7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义生成模型的超参数\n",
    "top_p = 0.8  # Top-p 采样的累积概率阈值\n",
    "top_k = 20  # Top-k 采样的 k 值，表示只保留概率最高的 k 个 token\n",
    "temperature = 0.7  # 温度系数，用于调整 logits 的分布，使生成更有随机性\n",
    "repetition_penalty = 1.1  # 重复惩罚系数，用于抑制生成重复的 token\n",
    "\n",
    "# 实现语言模型的输出层（lm_head），将最后一个 token 的隐藏状态映射为 logits\n",
    "def my_lm_head(states):\n",
    "    \"\"\"\n",
    "    将最后一个 token 的隐藏状态映射为 logits。\n",
    "\n",
    "    参数:\n",
    "    - states: torch.Tensor，模型的隐藏状态，形状为 [batch_size, seq_length, hidden_size]\n",
    "\n",
    "    返回值:\n",
    "    - logits: torch.Tensor，映射后的 logits，形状为 [1, vocab_size]\n",
    "    \"\"\"\n",
    "    logits = torch.nn.functional.linear(states[-1, -1, :], model.lm_head.weight)\n",
    "    logits = logits[None, :]  # 保持 batch 维度\n",
    "    return logits\n",
    "\n",
    "\n",
    "# 实现 Top-k 策略的 logits 处理\n",
    "def topk_logits_warper(input_ids, logits):\n",
    "    \"\"\"\n",
    "    实现 Top-k 采样策略，保留概率最高的 k 个 token，其余 token 的 logits 设置为负无穷。\n",
    "\n",
    "    参数:\n",
    "    - input_ids: 当前输入的 token 序列\n",
    "    - logits: torch.Tensor，模型的输出 logits，形状为 [1, vocab_size]\n",
    "\n",
    "    返回值:\n",
    "    - logits_processed: 经过处理后的 logits，只保留概率最高的 k 个 token\n",
    "    \"\"\"\n",
    "    filter_value = -float(\"Inf\")\n",
    "    top_k_temp = min(top_k, logits.size(-1))  # 确保 top_k 值不会超过 logits 的大小\n",
    "\n",
    "    # 找出所有 logits 中小于 top-k 第 k 大值的元素，并将其替换为负无穷\n",
    "    indices_to_remove = logits < torch.topk(logits, top_k_temp)[0][..., -1, None]\n",
    "    logits_processed = logits.masked_fill(indices_to_remove, filter_value)\n",
    "    return logits_processed\n",
    "\n",
    "\n",
    "# 实现 Top-p 策略的 logits 处理\n",
    "def topp_logits_warper(input_ids, logits):\n",
    "    \"\"\"\n",
    "    实现 Top-p 采样策略，保留累积概率小于 top_p 的 token，其余 token 的 logits 设置为负无穷。\n",
    "\n",
    "    参数:\n",
    "    - input_ids: 当前输入的 token 序列\n",
    "    - logits: torch.Tensor，模型的输出 logits，形状为 [1, vocab_size]\n",
    "\n",
    "    返回值:\n",
    "    - logits_processed: 经过处理后的 logits，只保留累积概率小于 top_p 的 token\n",
    "    \"\"\"\n",
    "    min_tokens_to_keep = 1\n",
    "    filter_value = -float(\"Inf\")\n",
    "\n",
    "    # 对 logits 进行排序并计算累积概率\n",
    "    sorted_logits, sorted_indices = torch.sort(logits, descending=False)\n",
    "    cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)\n",
    "\n",
    "    # 找出累积概率大于 (1 - top_p) 的 token，并将其替换为负无穷\n",
    "    sorted_indices_to_remove = cumulative_probs <= (1 - top_p)\n",
    "    sorted_indices_to_remove[..., -min_tokens_to_keep:] = 0  # 保证至少保留一个 token\n",
    "\n",
    "    # 将排序后的结果映射回原始的 logits\n",
    "    indices_to_remove = sorted_indices_to_remove.scatter(\n",
    "        1, sorted_indices, sorted_indices_to_remove\n",
    "    )\n",
    "    logits_processed = logits.masked_fill(indices_to_remove, filter_value)\n",
    "    return logits_processed\n",
    "\n",
    "\n",
    "# 实现温度系数的 logits 处理\n",
    "def temperature_logits_warper(input_ids, logits):\n",
    "    \"\"\"\n",
    "    实现温度系数处理，将 logits 除以温度系数来控制生成的随机性。\n",
    "\n",
    "    参数:\n",
    "    - input_ids: 当前输入的 token 序列\n",
    "    - logits: torch.Tensor，模型的输出 logits\n",
    "\n",
    "    返回值:\n",
    "    - logits_processed: 经过温度系数处理后的 logits\n",
    "    \"\"\"\n",
    "    logits_processed = logits / temperature  # 将 logits 除以温度系数\n",
    "    return logits_processed\n",
    "\n",
    "\n",
    "# 实现重复惩罚策略的 logits 处理\n",
    "def repetition_penalty_logits_processor(input_ids, logits):\n",
    "    \"\"\"\n",
    "    实现重复惩罚策略，对已经生成过的 token 施加惩罚，减少重复生成。\n",
    "\n",
    "    参数:\n",
    "    - input_ids: 当前输入的 token 序列\n",
    "    - logits: torch.Tensor，模型的输出 logits\n",
    "\n",
    "    返回值:\n",
    "    - logits_processed: 经过重复惩罚处理后的 logits\n",
    "    \"\"\"\n",
    "    score = torch.gather(logits, 1, input_ids)  # 获取 input_ids 对应的 logits 值\n",
    "    score = torch.where(score < 0, score * repetition_penalty, score / repetition_penalty)\n",
    "    logits_processed = logits.scatter(1, input_ids, score)  # 更新 logits\n",
    "    return logits_processed\n",
    "\n",
    "\n",
    "# logits 后处理函数，依次使用重复惩罚、温度调节、Top-k 和 Top-p 策略\n",
    "def logits_wrap_process(input_ids, logits):\n",
    "    \"\"\"\n",
    "    对 logits 进行处理，依次应用重复惩罚、温度、Top-k 和 Top-p 策略。\n",
    "\n",
    "    参数:\n",
    "    - input_ids: 当前输入的 token 序列\n",
    "    - logits: torch.Tensor，模型的输出 logits\n",
    "\n",
    "    返回值:\n",
    "    - logits_processed: 经过多种策略处理后的 logits\n",
    "    \"\"\"\n",
    "    logits_processed = repetition_penalty_logits_processor(input_ids, logits)\n",
    "    logits_processed = temperature_logits_warper(input_ids, logits_processed)\n",
    "    logits_processed = topk_logits_warper(input_ids, logits_processed)\n",
    "    logits_processed = topp_logits_warper(input_ids, logits_processed)\n",
    "    return logits_processed\n",
    "\n",
    "\n",
    "# 实现下一 token 的预测\n",
    "def predict_next_token(logits, input_ids):\n",
    "    \"\"\"\n",
    "    根据 logits 预测下一个 token，并应用采样策略。\n",
    "\n",
    "    参数:\n",
    "    - logits: torch.Tensor，模型的输出 logits\n",
    "    - input_ids: 当前输入的 token 序列\n",
    "\n",
    "    返回值:\n",
    "    - next_token_id: 预测出的下一个 token 的 ID\n",
    "    \"\"\"\n",
    "    next_token_logits = logits_wrap_process(input_ids, logits.to(torch.float32))  # 处理 logits\n",
    "    probs = torch.nn.functional.softmax(next_token_logits, dim=-1)  # 计算概率分布\n",
    "\n",
    "    # 根据概率分布进行采样，选出下一个 token 的 ID\n",
    "    next_token_id = torch.multinomial(probs, num_samples=1).squeeze(1)\n",
    "    return next_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "10b028d2-2623-4ca8-b90b-a6f0bffafb58",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "next_token_id = tensor([198])\n",
      "logits shape = torch.Size([1, 151936])\n",
      "层 0 缓存的 token 数： 20\n"
     ]
    }
   ],
   "source": [
    "# 定义输入的隐藏状态，形状为 [1, 20, 896]\n",
    "seq_length = 20\n",
    "hidden_states = torch.randn(1, seq_length, hidden_size).to(torch.bfloat16)\n",
    "input_ids = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 实例化 KVCache，用于缓存 Key 和 Value\n",
    "kv_cache = KVCache()\n",
    "position_id = torch.arange(seq_length).reshape(1, seq_length)\n",
    "\n",
    "# 调用 my_module 进行前向计算，得到隐藏状态和 KV 缓存\n",
    "states, past_kv_cache = my_module(hidden_states, kv_cache, position_id)\n",
    "\n",
    "# 通过 lm_head 获取 logits\n",
    "logits = my_lm_head(states)\n",
    "\n",
    "# 预测下一个 token 的 ID\n",
    "next_token_id = predict_next_token(logits, input_ids)\n",
    "\n",
    "# 将预测出的 token ID 解码为实际的文本 token\n",
    "next_token = tokenizer.decode(next_token_id)\n",
    "\n",
    "# 打印结果\n",
    "print(f\"next_token_id = {next_token_id}\")\n",
    "print(f\"logits shape = {logits.shape}\")\n",
    "past_kv_cache.print(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1c0aebc8-ea6d-48c1-87c3-0b5fc69315dc",
   "metadata": {},
   "source": [
    "# Part7"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "c87ef762-fd53-45ff-9594-c74831bdcbeb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个函数来判断 token 是否是结束符号（EOS token）\n",
    "def is_token_eos(token_id):\n",
    "    \"\"\"\n",
    "    判断 token_id 是否是结束符号（EOS token）。\n",
    "\n",
    "    参数:\n",
    "    - token_id: int 或 list，表示当前生成的 token ID\n",
    "\n",
    "    返回值:\n",
    "    - bool，表示是否为 EOS token\n",
    "    \"\"\"\n",
    "    eos_token_id = [151645, 151643]  # 定义结束符号的 token ID\n",
    "    return token_id in eos_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "b541a3bc-41e5-427a-9b33-57fb3b7580aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "User Input: 一个星期有几天?\n",
      "\n",
      "predict next token id: tensor([117041]), next word: 一个星期\n",
      "predict next token id: tensor([18830]), next word: 有\n",
      "predict next token id: tensor([99612]), next word: 七\n",
      "predict next token id: tensor([35727]), next word: 天\n",
      "predict next token id: tensor([1773]), next word: 。\n",
      "predict next token id: tensor([109989]), next word: 这是因为\n",
      "predict next token id: tensor([102600]), next word: 星期\n",
      "predict next token id: tensor([14777]), next word: 一\n",
      "predict next token id: tensor([26939]), next word: 到\n",
      "predict next token id: tensor([102600]), next word: 星期\n",
      "predict next token id: tensor([8903]), next word: 日\n",
      "predict next token id: tensor([3837]), next word: ，\n",
      "predict next token id: tensor([105309]), next word: 一周\n",
      "predict next token id: tensor([104678]), next word: 共有\n",
      "predict next token id: tensor([22]), next word: 7\n",
      "predict next token id: tensor([35727]), next word: 天\n",
      "predict next token id: tensor([1773]), next word: 。\n",
      "\n",
      "Answer: 一个星期有七天。这是因为星期一到星期日，一周共有7天。\n"
     ]
    }
   ],
   "source": [
    "# 模拟用户输入的提示信息（prompt）\n",
    "# user_prompt = input(\"\\n我是手写的AI模型，请输入你的问题：\")\n",
    "user_prompt = \"一个星期有几天?\"\n",
    "\n",
    "# 应用聊天模板，将用户的输入转换为带模板的输入\n",
    "prompt = my_apply_chat_template(user_prompt)\n",
    "\n",
    "# 初始化 KV 缓存，用于存储和管理注意力层的 Key 和 Value\n",
    "past_key_value = KVCache()\n",
    "\n",
    "# 初始化输入的 token ID 和位置 ID\n",
    "input_ids = None\n",
    "position_id = None\n",
    "\n",
    "# 设置最大生成的 token 数量\n",
    "max_new_tokens = 20\n",
    "\n",
    "# 初始化答案文本\n",
    "answers = \"\"\n",
    "print(f\"\\n\\nUser Input: {user_prompt}\\n\")\n",
    "\n",
    "# 循环生成 token，直到生成出答案或达到最大 token 限制\n",
    "for _ in range(max_new_tokens):\n",
    "    # 将用户的输入 prompt 转换为词嵌入向量\n",
    "    prompt_ids, embeddings = my_word_embedding_process(prompt)\n",
    "\n",
    "    # 如果 input_ids 尚未初始化，则将其设置为 prompt_ids\n",
    "    input_ids = prompt_ids if input_ids is None else input_ids\n",
    "\n",
    "    # 初始化 position_id，表示 token 的位置\n",
    "    if position_id is None:\n",
    "        text_len = prompt_ids.size()[-1]\n",
    "        position_id = torch.arange(text_len).reshape(1, text_len)\n",
    "    else:\n",
    "        # 更新 position_id，表示生成的下一个 token 的位置\n",
    "        position_id = torch.tensor([[text_len]])\n",
    "        text_len += 1\n",
    "\n",
    "    # 调用模型的解码器模块，生成隐藏状态\n",
    "    states, past_key_value = my_module(embeddings, past_key_value, position_id)\n",
    "\n",
    "    # 使用 lm_head 将隐藏状态映射为 logits\n",
    "    logits = my_lm_head(states)\n",
    "\n",
    "    # 根据 logits 预测下一个 token 的 ID\n",
    "    next_token_id = predict_next_token(logits, input_ids)\n",
    "\n",
    "    # 将预测的 token ID 解码为实际的文本 token\n",
    "    next_token = tokenizer.decode(next_token_id)\n",
    "\n",
    "    # 更新 input_ids，追加生成的下一个 token\n",
    "    input_ids = torch.cat([input_ids, next_token_id[:, None]], dim=-1)\n",
    "\n",
    "    # 更新 prompt，将生成的下一个 token 作为新的输入\n",
    "    prompt = next_token\n",
    "\n",
    "    # 检查是否生成了结束符号（EOS token），如果是，则结束生成\n",
    "    if is_token_eos(next_token_id):\n",
    "        break\n",
    "\n",
    "    # 累积生成的答案\n",
    "    answers += next_token\n",
    "\n",
    "    # 打印生成的 token ID 和对应的文本 token\n",
    "    print(f\"predict next token id: {next_token_id}, next word: {next_token}\")\n",
    "\n",
    "# 打印最终生成的答案\n",
    "print(f\"\\nAnswer: {answers}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
