{"cells": [{"cell_type": "markdown", "metadata": {}, "source": "# 05 Flash Attention\nFlash Attention Kernel \u793a\u4f8b"}, {"cell_type": "code", "metadata": {}, "source": "import torch\nimport triton\nimport triton.language as tl\n\n@triton.jit\ndef flash_attention_kernel(Q_ptr, K_ptr, V_ptr, Out_ptr, N: tl.constexpr):\n    pid = tl.program_id(0)\n    if pid < N:\n        q = tl.load(Q_ptr + pid)\n        k = tl.load(K_ptr + pid)\n        v = tl.load(V_ptr + pid)\n        tl.store(Out_ptr + pid, q + k + v)  # placeholder\n\nN = 16\nQ = torch.randn(N, device='cuda')\nK = torch.randn(N, device='cuda')\nV = torch.randn(N, device='cuda')\nOut = torch.empty_like(Q)\nflash_attention_kernel[(N,)](Q, K, V, Out, N=N)\nprint(\"Out =\", Out)", "outputs": [], "execution_count": null}], "metadata": {"title": "Flash Attention"}, "nbformat": 4, "nbformat_minor": 5}