import torch
import torch.nn.functional as F
from rich import print
from torch.backends.cuda import sdp_kernel     #内核计算
from enum import IntEnum
import torch.utils.benchmark as benchmark
device = "cuda" if torch.cuda.is_available() else "cpu"       #cudnn 需要使用gpu

# 超参数定义
batch_size = 64
max_sequence_len = 256
num_heads = 32
embed_dimension = 32
dtype = torch.float16

# 模拟 q k v
query = torch.rand(batch_size, num_heads, max_sequence_len, embed_dimension, device=device, dtype=dtype)
key = torch.rand(batch_size, num_heads, max_sequence_len, embed_dimension, device=device, dtype=dtype)
value = torch.rand(batch_size, num_heads, max_sequence_len, embed_dimension, device=device, dtype=dtype)

# 定义一个计时器:
def torch_timer(f, *args, **kwargs):
    t0 = benchmark.Timer(
        stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}
    )
    return t0.blocked_autorange().mean * 1e6

# torch.backends.cuda中也实现了，这里拿出了为了好理解backend_map是啥
class SDPBackend(IntEnum):
    r"""
    Enum class for the scaled dot product attention backends.
    """
    ERROR = -1
    MATH = 0
    FLASH_ATTENTION = 1
    EFFICIENT_ATTENTION = 2

# 使用上下文管理器context manager来
# 其他三种方案，字典映射
backend_map = {
    SDPBackend.MATH: {               #启用pytorch 实现
        "enable_math": True,
        "enable_flash": False,
        "enable_mem_efficient": False},
    SDPBackend.FLASH_ATTENTION: {     #启用flashattention
        "enable_math": False,
        "enable_flash": True,
        "enable_mem_efficient": False},
    SDPBackend.EFFICIENT_ATTENTION: {   #启用memory_efficient attention
        "enable_math": False,
        "enable_flash": False,
        "enable_mem_efficient": True}
}

# 基本版，不指定
print(f"基本对照方案 运行时间： {torch_timer(F.scaled_dot_product_attention, query, key, value):.3f} microseconds")
# 基本对照方案 运行时间： 558.831 microseconds

#内核中运行
with sdp_kernel(**backend_map[SDPBackend.MATH]):
    print(f"math 运行时间： {torch_timer(F.scaled_dot_product_attention, query, key, value):.3f} microseconds")
# math 运行时间： 1013.422 microseconds
with sdp_kernel(**backend_map[SDPBackend.FLASH_ATTENTION]):
    try:
        print(f"flash attention 运行时间： {torch_timer(F.scaled_dot_product_attention, query, key, value):.3f} microseconds")
    except RuntimeError:
        print("FlashAttention is not supported")
# flash attention 运行时间：  557.343 microseconds
with sdp_kernel(**backend_map[SDPBackend.EFFICIENT_ATTENTION]):
    try:
        print(f"Memory efficient 运行时间：{torch_timer(F.scaled_dot_product_attention, query, key, value):.3f} microseconds")
    except RuntimeError:
        print("EfficientAttention is not supported")
# Memory efficient 运行时间： 428.007 microseconds
