{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "88ec3461",
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List, Optional, Tuple\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import math\n",
    "import time\n",
    "\n",
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "\n",
    "\n",
    "class Config:\n",
    "    def __init__(self, hidden_size: int = 512, num_attention_heads: int = 8) -> None:\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_attention_heads = num_attention_heads\n",
    "\n",
    "\n",
    "class AttentionWithoutCache(nn.Module):\n",
    "    def __init__(self, config: Config) -> None:\n",
    "        \"\"\"\n",
    "        初始化 AttentionWithoutCache 类。\n",
    "\n",
    "        参数:\n",
    "        - config: 配置类对象，包含模型的超参数，如 hidden_size 和 num_attention_heads。\n",
    "        \"\"\"\n",
    "        super().__init__()  # 调用父类 nn.Module 的初始化方法\n",
    "        self.config = config  # 保存配置对象\n",
    "        self.hidden_size = config.hidden_size  # 获取隐藏层大小\n",
    "        self.num_heads = config.num_attention_heads  # 获取注意力头的数量\n",
    "        self.head_dim = self.hidden_size // self.num_heads  # 计算每个注意力头的维度\n",
    "        assert self.head_dim * self.num_heads == self.hidden_size, \"hidden_size must be divisible by num_heads.\"  # 确保 hidden_size 能被 num_heads 整除\n",
    "\n",
    "        # 定义线性变换层\n",
    "        self.query_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)  # 查询向量投影层\n",
    "        self.key_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)  # 键向量投影层\n",
    "        self.value_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)  # 值向量投影层\n",
    "        self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)  # 输出投影层\n",
    "\n",
    "    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        前向传播方法，计算多头注意力机制的输出。\n",
    "\n",
    "        参数:\n",
    "        - hidden_states: 输入张量，形状为 (batch_size, sequence_length, hidden_size)。\n",
    "\n",
    "        返回值:\n",
    "        - attn_output: 多头注意力机制的输出，形状为 (batch_size, sequence_length, hidden_size)。\n",
    "        \"\"\"\n",
    "        bsz, seq_len, _ = hidden_states.size()  # 获取输入张量的 batch_size 和 sequence_length\n",
    "\n",
    "        # 将输入张量投影为查询、键和值向量，并重塑为多头形式\n",
    "        query_states = self.query_proj(hidden_states).view(\n",
    "            bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # 形状: (bsz, num_heads, seq_len, head_dim)\n",
    "        key_states = self.key_proj(hidden_states).view(\n",
    "            bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # 形状: (bsz, num_heads, seq_len, head_dim)\n",
    "        value_states = self.value_proj(hidden_states).view(\n",
    "            bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # 形状: (bsz, num_heads, seq_len, head_dim)\n",
    "\n",
    "        # 计算注意力权重\n",
    "        attn_weights = torch.matmul(\n",
    "            query_states, key_states.transpose(-2, -1)) / math.sqrt(self.head_dim)  # 形状: (bsz, num_heads, seq_len, seq_len)\n",
    "        attn_weights = torch.softmax(attn_weights, dim=-1)  # 对注意力权重进行 softmax 归一化\n",
    "\n",
    "        # 计算注意力输出\n",
    "        attn_output = torch.matmul(attn_weights, value_states).transpose(\n",
    "            1, 2).reshape(bsz, seq_len, self.hidden_size)  # 形状: (bsz, seq_len, hidden_size)\n",
    "        attn_output = self.o_proj(attn_output)  # 将输出投影回原始维度\n",
    "\n",
    "        return attn_output  # 返回多头注意力机制的输出\n",
    "\n",
    "class AttentionWithCache(nn.Module):\n",
    "    def __init__(self, config: Config) -> None:\n",
    "        super().__init__()\n",
    "        self.config = config\n",
    "        self.hidden_size = config.hidden_size\n",
    "        self.num_heads = config.num_attention_heads\n",
    "        self.head_dim = self.hidden_size // self.num_heads\n",
    "        assert self.head_dim * \\\n",
    "            self.num_heads == self.hidden_size, \"hidden_size must be divisible by num_heads.\"\n",
    "\n",
    "        self.query_proj = nn.Linear(\n",
    "            self.hidden_size, self.hidden_size, bias=False)\n",
    "        self.key_proj = nn.Linear(\n",
    "            self.hidden_size, self.hidden_size, bias=False)\n",
    "        self.value_proj = nn.Linear(\n",
    "            self.hidden_size, self.hidden_size, bias=False)\n",
    "        self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)\n",
    "\n",
    "    def forward(self, hidden_states: torch.Tensor, key_cache: Optional[torch.Tensor] = None, value_cache: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n",
    "        bsz, q_len, _ = hidden_states.size()\n",
    "\n",
    "        query_states = self.query_proj(hidden_states).view(\n",
    "            bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "\n",
    "        if key_cache is not None:\n",
    "            key_states = torch.cat([key_cache, self.key_proj(hidden_states).view(\n",
    "                bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)], dim=2)\n",
    "        else:\n",
    "            key_states = self.key_proj(hidden_states).view(\n",
    "                bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "\n",
    "        if value_cache is not None:\n",
    "            value_states = torch.cat([value_cache, self.value_proj(hidden_states).view(\n",
    "                bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)], dim=2)\n",
    "        else:\n",
    "            value_states = self.value_proj(hidden_states).view(\n",
    "                bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "\n",
    "        attn_weights = torch.matmul(\n",
    "            query_states, key_states.transpose(-2, -1)) / math.sqrt(self.head_dim)\n",
    "        attn_weights = torch.softmax(attn_weights, dim=-1)\n",
    "\n",
    "        attn_output = torch.matmul(attn_weights, value_states).transpose(\n",
    "            1, 2).reshape(bsz, q_len, self.hidden_size)\n",
    "        attn_output = self.o_proj(attn_output)\n",
    "\n",
    "        return attn_output, key_states, value_states\n",
    "\n",
    "\n",
    "def generate_input(bsz: int, seq_len: int, hidden_size: int) -> torch.Tensor:\n",
    "    \"\"\"生成随机的输入张量。\"\"\"\n",
    "    return torch.randn(bsz, seq_len, hidden_size)\n",
    "\n",
    "\n",
    "def test_without_cache(model: nn.Module, input_sequence: torch.Tensor, generation_length: int) -> List[torch.Tensor]:\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        generated_sequence = []\n",
    "\n",
    "        for step in range(generation_length):\n",
    "            output = model(input_sequence)[:,-1:,:]\n",
    "            generated_sequence.append(output)\n",
    "            input_sequence = torch.concat((input_sequence, output), dim=1)\n",
    "\n",
    "        return generated_sequence\n",
    "\n",
    "def test_with_cache(model: nn.Module, input_sequence: torch.Tensor, generation_length: int) -> List[torch.Tensor]:\n",
    "    \"\"\"\n",
    "    使用缓存机制测试模型，生成指定长度的序列。\n",
    "\n",
    "    参数:\n",
    "    - model: 模型对象，支持缓存机制。\n",
    "    - input_sequence: 输入序列张量，形状为 (batch_size, sequence_length, hidden_size)。\n",
    "    - generation_length: 需要生成的序列长度。\n",
    "\n",
    "    返回值:\n",
    "    - generated_sequence: 生成的序列列表，每个元素是一个张量，形状为 (batch_size, 1, hidden_size)。\n",
    "    \"\"\"\n",
    "    model.eval()  # 将模型设置为评估模式，关闭 dropout 和 batch normalization 等训练时特有的行为\n",
    "    with torch.no_grad():  # 禁用梯度计算，减少内存消耗并加速推理\n",
    "        generated_sequence = []  # 初始化生成的序列列表\n",
    "        key_cache = None  # 初始化键缓存\n",
    "        value_cache = None  # 初始化值缓存\n",
    "\n",
    "        for step in range(generation_length):  # 循环生成指定长度的序列\n",
    "            # 调用模型，获取注意力输出和更新后的键、值缓存\n",
    "            attn_output, key_cache, value_cache = model(input_sequence, key_cache, value_cache)\n",
    "            # 取注意力输出的最后一个 token，形状为 (batch_size, 1, hidden_size)\n",
    "            output = attn_output[:, -1:, :]\n",
    "            # 将生成的 token 添加到生成的序列列表中\n",
    "            generated_sequence.append(output)\n",
    "            # 将生成的 token 作为下一时间步的输入\n",
    "            input_sequence = output\n",
    "\n",
    "        return generated_sequence  # 返回生成的序列列表\n",
    "\n",
    "def main():\n",
    "    config = Config(hidden_size=512, num_attention_heads=8)\n",
    "\n",
    "    bsz = 10\n",
    "    seq_len = 10\n",
    "    hidden_size = config.hidden_size\n",
    "    input_tensor = generate_input(bsz, 1, hidden_size).to(device)\n",
    "\n",
    "    # 使用缓存\n",
    "    attention_layer_with_cache = AttentionWithCache(config).to(device)\n",
    "    attention_layer_with_cache.load_state_dict(torch.load('model.pth'))\n",
    "    start_time = time.time()\n",
    "    with_cache_outputs = test_with_cache(attention_layer_with_cache, input_tensor, seq_len)\n",
    "    time_with_cache = time.time() - start_time\n",
    "    print(f\"Time with cache: {time_with_cache:.6f} seconds.\")\n",
    "\n",
    "    # 不使用缓存\n",
    "    attention_layer_without_cache = AttentionWithoutCache(config).to(device)\n",
    "    attention_layer_without_cache.load_state_dict(torch.load('model.pth'))\n",
    "    start_time = time.time()\n",
    "    without_cache_outputs = test_without_cache(attention_layer_without_cache, input_tensor, seq_len)\n",
    "    time_without_cache = time.time() - start_time\n",
    "    print(f\"Time without cache: {time_without_cache:.6f} seconds.\")\n",
    "\n",
    "    if time_with_cache < time_without_cache:\n",
    "        print(\"Using cache is faster.\")\n",
    "    else:\n",
    "        print(\"Using cache did not improve performance.\")\n",
    "\n",
    "    for idx, (without_cache_output, with_cache_output) in enumerate(zip(without_cache_outputs, with_cache_outputs)):\n",
    "        assert torch.allclose(with_cache_output, without_cache_output, atol=0.001)\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorchgpu",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
