{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3.4.0\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import triton\n",
    "from triton_flash_attn import triton_flash_attn_varlen_func\n",
    "from flash_attn import flash_attn_varlen_func\n",
    "import random\n",
    "from copy import deepcopy\n",
    "\n",
    "import os\n",
    "os.environ['TRITON_PRINT_AUTOTUNING'] = '1'\n",
    "\n",
    "def compare(x, y, prefix=\"\"):\n",
    "    if x is None or y is None:\n",
    "        return\n",
    "    if any([x.dtype == torch.float32, y.dtype==torch.float32]):\n",
    "        x,y = x.float(), y.float()\n",
    "    diff = (x-y).abs()\n",
    "    if prefix:\n",
    "        print(prefix, end=\": \")\n",
    "    print(f\"最大差异: {diff.max().item()}, 平均差异: {diff.mean().item()}\")\n",
    "\n",
    "def custom_flash_attn_varlen_func(q, k, v, cu_seqlens, max_len, causal=True, window_size=-1):\n",
    "    d = q.size(-1)\n",
    "    vd = v.size(-1)\n",
    "    if d != vd:\n",
    "        v = torch.nn.functional.pad(v, (0, q.size(-1) - v.size(-1)))\n",
    "    o = flash_attn_varlen_func(q, k, v, cu_seqlens, cu_seqlens, max_len, max_len, causal=causal, window_size=window_size, return_attn_probs=False)\n",
    "    if d != vd:\n",
    "        o = o[..., :vd].contiguous()\n",
    "    return o\n",
    "\n",
    "print(triton.__version__)\n",
    "# 建议最新版，3.2肯定不行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = \"cuda\"\n",
    "dtype = torch.bfloat16\n",
    "\n",
    "def alloc_fn(size: int, align: int, _):\n",
    "    return torch.empty(size, dtype=torch.int8, device=device)\n",
    "triton.set_allocator(alloc_fn)\n",
    "\n",
    "b, qh, kh, d = 4, 32, 8, 128\n",
    "off = 64\n",
    "splits = [random.randint(4096, 8192) for _ in range(b)]\n",
    "splits = [1024 * 8] * 2\n",
    "t = sum(splits)\n",
    "cu_seqlens = torch.tensor([0] + splits, device=device).cumsum(0).to(torch.int32)\n",
    "max_len = max(splits)\n",
    "\n",
    "q1 = torch.randn(t, qh, d+off, device=device, dtype=dtype).requires_grad_(True)\n",
    "k1 = torch.randn(t, kh, d+off, device=device, dtype=dtype).requires_grad_(True)\n",
    "v1 = torch.randn(t, kh, d, device=device, dtype=dtype).requires_grad_(True)\n",
    "q2, k2, v2 = deepcopy(q1), deepcopy(k1), deepcopy(v1)\n",
    "torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最大差异: 0.00390625, 平均差异: 0.0001087188720703125\n",
      "最大差异: 0.0078125, 平均差异: 3.218650817871094e-05\n",
      "最大差异: 0.03125, 平均差异: 0.0001735687255859375\n",
      "最大差异: 0.03125, 平均差异: 0.00019168853759765625\n"
     ]
    }
   ],
   "source": [
    "window_size = (512, -1)\n",
    "causal = True\n",
    "o1, lse = triton_flash_attn_varlen_func(q1, k1, v1, cu_seqlens, max_len, causal=causal, window_size=window_size)\n",
    "o1\n",
    "o2 = custom_flash_attn_varlen_func(q2, k2, v2, cu_seqlens, max_len, causal=causal, window_size=window_size)\n",
    "compare(o1, o2)\n",
    "# compare(lse.transpose(0, 1), lse2 * 1.44)\n",
    "\n",
    "do = torch.randn_like(o1)\n",
    "o1.backward(do)\n",
    "o2.backward(do)\n",
    "compare(q1.grad, q2.grad)\n",
    "compare(k1.grad, k2.grad)\n",
    "compare(v1.grad, v2.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.09254133275577\n",
      "1.1511510169064556\n",
      "2.6635030061006546\n",
      "3.3410962952507868\n"
     ]
    }
   ],
   "source": [
    "print(triton.testing.do_bench(lambda: triton_flash_attn_varlen_func(q1, k1, v1, cu_seqlens, max_len, causal=True, window_size=window_size)))\n",
    "print(triton.testing.do_bench(lambda: custom_flash_attn_varlen_func(q2, k2, v2, cu_seqlens, max_len, causal=True, window_size=window_size)))\n",
    "o1, lse = triton_flash_attn_varlen_func(q1, k1, v1, cu_seqlens, max_len, causal=True, window_size=window_size)\n",
    "o2 = custom_flash_attn_varlen_func(q2, k2, v2, cu_seqlens, max_len, causal=True, window_size=window_size)\n",
    "do = torch.randn_like(o1)\n",
    "\n",
    "print(triton.testing.do_bench(lambda: o1.backward(do, retain_graph=True), grad_to_none=[q1, k1, v1]))\n",
    "print(triton.testing.do_bench(lambda: o2.backward(do, retain_graph=True), grad_to_none=[q2, k2, v2]))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "q_idx = torch.arange(8)\n",
    "k_idx = torch.arange(8)\n",
    "w = 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ True, False, False, False, False, False, False, False],\n",
       "        [ True,  True, False, False, False, False, False, False],\n",
       "        [False,  True,  True, False, False, False, False, False],\n",
       "        [False, False,  True,  True, False, False, False, False],\n",
       "        [False, False, False,  True,  True, False, False, False],\n",
       "        [False, False, False, False,  True,  True, False, False],\n",
       "        [False, False, False, False, False,  True,  True, False],\n",
       "        [False, False, False, False, False, False,  True,  True]])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mask = (q_idx[:, None] >= k_idx[None, :]) & (q_idx[:, None] - w < k_idx[None, :])\n",
    "mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
