import os
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice

if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
    exp = tldevice.fast_expf
    exp2 = tldevice.exp2
    log = tldevice.fast_logf
    log2 = tldevice.fast_log2f
else:
    exp = tl.exp
    exp2 = tl.math.exp2
    log = tl.log
    log2 = tl.log2

@triton.heuristics({
    'IS_VARLEN': lambda args: args['cu_seqlens'] is not None
})
@triton.jit(do_not_specialize=['T'])
def chunk_scaled_dot_kkt_fwd_kernel_intra_sub_inter(
    k,
    g,
    beta,
    A,
    cu_seqlens,
    chunk_indices,
    T,
    H: tl.constexpr,
    K: tl.constexpr,
    BT: tl.constexpr,
    BC: tl.constexpr,
    BK: tl.constexpr,
    NC: tl.constexpr,
    IS_VARLEN: tl.constexpr,
):
    i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    i_b, i_h = i_bh // H, i_bh % H
    i_i, i_j = i_c // NC, i_c % NC
    if IS_VARLEN:
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
        T = eos - bos
    else:
        bos, eos = i_b * T, i_b * T + T

    if i_t * BT + i_i * BC >= T:
        return
    if i_i <= i_j:
        return

    k += (bos * H + i_h) * K
    g += (bos * H + i_h) * K
    A += (bos * H + i_h) * BT

    p_beta = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t * BT + i_i * BC,), (BC,), (0,))
    b_beta = tl.load(p_beta, boundary_check=(0,))

    b_A = tl.zeros([BC, BC], dtype=tl.float32)
    for i_k in range(tl.cdiv(K, BK)):
        p_k = tl.make_block_ptr(k, (T, K), (H*K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
        p_g = tl.make_block_ptr(g, (T, K), (H*K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0))
        b_kt = tl.make_block_ptr(k, (K, T), (1, H*K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1))
        p_gk = tl.make_block_ptr(g, (K, T), (1, H*K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1))

        o_k = i_k * BK + tl.arange(0, BK)
        m_k = o_k < K
        # [BK,]
        b_gn = tl.load(g + (i_t * BT + i_i * BC) * H*K + o_k, mask=m_k, other=0)
        # [BC, BK]
        b_g = tl.load(p_g, boundary_check=(0, 1))
        b_k = tl.load(p_k, boundary_check=(0, 1)) * exp(b_g - b_gn[None, :])
        # [BK, BC]
        b_gk = tl.load(p_gk, boundary_check=(0, 1))
        b_kt = tl.load(b_kt, boundary_check=(0, 1)) * exp(b_gn[:, None] - b_gk)
        # [BC, BC]
        b_A += tl.dot(b_k, b_kt)
    b_A *= b_beta[:, None]

    p_A = tl.make_block_ptr(A, (T, BT), (H*BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0))
    tl.store(p_A, b_A.to(A.dtype.element_ty), boundary_check=(0, 1))

def test_chunk_scaled_dot_kkt_fwd_kernel_intra_sub_inter():
    # 设置随机种子以确保可重复性
    torch.manual_seed(42)

    # 定义参数
    B = 2        # 批量大小
    T = 64       # 序列长度
    H = 8        # 头的数量
    K = 32       # Key/Value的维度
    BT = 16      # block大小 for T
    BC = 4       # block大小 for chunk
    BK = 8       # block大小 for K
    NC = 2       # block大小 for chunk index

    # 生成随机输入张量
    device = 'npu'  # 使用NPU设备
    dtype = torch.float16

    # 输入张量
    k = torch.randn(B, T, H, K, dtype=dtype, device=device)
    g = torch.randn(B, T, H, K, dtype=dtype, device=device)
    beta = torch.randn(B, T, H, dtype=dtype, device=device)
    A = torch.randn(B, T, H, BT, dtype=dtype, device=device)
    cu_seqlens = torch.randint(low=0, high=T, size=(B + 1,), dtype=torch.int32, device=device)
    cu_seqlens[0] = 0
    cu_seqlens[-1] = T
    chunk_indices = torch.randint(low=0, high=B, size=(2 * T,), dtype=torch.int32, device=device)

    # 计算网格大小
    num_blocks_t = triton.cdiv(T, BT)
    num_blocks_c = triton.cdiv(T, BC)
    num_blocks_h = H
    grid = (num_blocks_t, num_blocks_c, num_blocks_h)

    # 启用功能标志
    IS_VARLEN = True  # 启用变长序列处理

    # 调用内核函数
    chunk_scaled_dot_kkt_fwd_kernel_intra_sub_inter[grid](
        k, g, beta, A,
        cu_seqlens, chunk_indices, T,
        H=H, K=K, BT=BT, BC=BC, BK=BK, NC=NC,
        IS_VARLEN=IS_VARLEN
    )

if __name__ == "__main__":
    test_chunk_scaled_dot_kkt_fwd_kernel_intra_sub_inter()