import os
import inspect
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice

if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
    exp = tldevice.fast_expf
    exp2 = tldevice.exp2
    log = tldevice.fast_logf
    log2 = tldevice.fast_log2f
else:
    exp = tl.exp
    exp2 = tl.math.exp2
    log = tl.log
    log2 = tl.log2

NUM_WARPS = [2, 4, 8]
FLA_CACHE_RESULTS = os.getenv('FLA_CACHE_RESULTS', '1') == '1'

supports_autotune_cache = "cache_results" in inspect.signature(triton.autotune).parameters
autotune_cache_kwargs = {"cache_results": FLA_CACHE_RESULTS} if supports_autotune_cache else {}

@triton.heuristics({
    'USE_G': lambda args: args['g'] is not None,
    'USE_G_GAMMA': lambda args: args['g_gamma'] is not None,
    'IS_VARLEN': lambda args: args['cu_seqlens'] is not None,
})
# @triton.autotune(
#     configs=[
#         triton.Config({}, num_warps=num_warps, num_stages=num_stages)
#         for num_warps in NUM_WARPS
#         for num_stages in [2, 3, 4]
#     ],
#     key=['H', 'K', 'V', 'BT', 'BK', 'BV', 'USE_G'],
#     **autotune_cache_kwargs
# )
@triton.jit(do_not_specialize=['T'])
def chunk_bwd_kernel_dv_local(
    q,
    k,
    g,
    g_gamma,
    do,
    dv,
    cu_seqlens,
    chunk_indices,
    scale,
    T,
    H: tl.constexpr,
    K: tl.constexpr,
    V: tl.constexpr,
    BT: tl.constexpr,
    BK: tl.constexpr,
    BV: tl.constexpr,
    USE_G: tl.constexpr,
    USE_G_GAMMA: tl.constexpr,
    IS_VARLEN: tl.constexpr,
):
    i_t, i_bh = tl.program_id(0), tl.program_id(1)
    i_b, i_h = i_bh // H, i_bh % H
    if IS_VARLEN:
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
        T = eos - bos
    else:
        bos, eos = i_b * T, i_b * T + T

    # offset calculation
    q += (bos * H + i_h) * K
    k += (bos * H + i_h) * K
    do += (bos * H + i_h) * V
    dv += (bos * H + i_h) * V

    b_A = tl.zeros([BT, BT], dtype=tl.float32)
    for i_k in range(tl.cdiv(K, BK)):
        p_k = tl.make_block_ptr(k, (T, K), (H*K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))
        p_q = tl.make_block_ptr(q, (K, T), (1, H*K), (i_k * BK, i_t * BT), (BK, BT), (0, 1))
        b_q = tl.load(p_q, boundary_check=(0, 1))
        b_k = tl.load(p_k, boundary_check=(0, 1))
        b_A += tl.dot(b_k, b_q)

    if USE_G:
        g += bos * H + i_h
        p_g = tl.make_block_ptr(g, (T,), (H,), (i_t * BT,), (BT,), (0,))
        b_g = tl.load(p_g, boundary_check=(0,))
    if USE_G_GAMMA:
        b_gamma = tl.load(g_gamma + i_h)
        b_g = b_gamma * (tl.arange(0, BT) + 1)

    o_t = i_t * BT + tl.arange(0, BT)
    m_t = o_t < T
    m_A = (o_t[:, None] <= o_t[None, :]) & (m_t[:, None] & m_t)
    if USE_G:
        b_A = tl.where(m_A, b_A * exp(b_g[None, :] - b_g[:, None]) * scale, 0).to(do.dtype.element_ty)
    else:
        b_A = tl.where(m_A, b_A * scale, 0).to(do.dtype.element_ty)

    for i_v in range(tl.cdiv(V, BV)):
        p_do = tl.make_block_ptr(do, (T, V), (H*V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
        p_dv = tl.make_block_ptr(dv, (T, V), (H*V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))
        b_do = tl.load(p_do, boundary_check=(0, 1))
        b_dv = tl.dot(b_A.to(b_do.dtype), b_do)
        tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))

def test_chunk_bwd_kernel_dv_local():
    # 设置随机种子以确保可重复性
    torch.manual_seed(42)

    # 定义参数
    B = 2       # 批量大小
    T = 16      # 序列长度
    H = 8       # 头的数量
    K = 64      # key的维度
    V = 64      # value的维度
    BT = 16     # block大小 for T
    BK = 16      # block大小 for K
    BV = 16      # block大小 for V

    # 生成随机输入张量
    device = 'npu'  # 确保使用GPU
    dtype = torch.float32

    # 输入张量
    q = torch.randn(B, T, H, K, dtype=dtype, device=device)
    k = torch.randn(B, T, H, K, dtype=dtype, device=device)
    g = torch.randn(B, T, H, dtype=dtype, device=device)
    g_gamma = torch.randn(H, dtype=dtype, device=device)
    do = torch.randn(B, T, H, V, dtype=dtype, device=device)
    dv = torch.randn(B, T, H, V, dtype=dtype, device=device)

    # 输出张量
    dv_out = torch.zeros_like(dv)

    # 变长序列参数（示例）
    cu_seqlens = torch.randint(low=0, high=T, size=(B + 1,), dtype=torch.int32, device=device)
    cu_seqlens[0] = 0
    cu_seqlens[-1] = T
    chunk_indices = torch.randint(low=0, high=B, size=(2 * T,), dtype=torch.int32, device=device)

    # 随机缩放因子
    scale = torch.randn(1, dtype=dtype, device=device).item()

    # 计算网格大小
    num_blocks_t = triton.cdiv(T, BT)
    num_blocks_h = H
    grid = (num_blocks_t, num_blocks_h)

    # 启用功能标志
    USE_G = True
    USE_G_GAMMA = True
    IS_VARLEN = True

    # 调用内核函数
    chunk_bwd_kernel_dv_local[grid](
        q, k, g, g_gamma, do, dv,
        cu_seqlens, chunk_indices, scale, T,
        H=H, K=K, V=V, BT=BT, BK=BK, BV=BV,
        USE_G=USE_G, USE_G_GAMMA=USE_G_GAMMA, IS_VARLEN=IS_VARLEN
    )

if __name__ == "__main__":
    test_chunk_bwd_kernel_dv_local()