import os
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice

if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
    exp = tldevice.fast_expf
    exp2 = tldevice.exp2
    log = tldevice.fast_logf
    log2 = tldevice.fast_log2f
else:
    exp = tl.exp
    exp2 = tl.math.exp2
    log = tl.log
    log2 = tl.log2

@triton.heuristics({
    'HAS_SCALE': lambda args: args['scale'] is not None,
    'IS_VARLEN': lambda args: args['cu_seqlens'] is not None
})
@triton.jit(do_not_specialize=['T'])
def chunk_local_cumsum_vector_kernel(
    s,
    o,
    scale,
    cu_seqlens,
    chunk_indices,
    T,
    B: tl.constexpr,
    H: tl.constexpr,
    S: tl.constexpr,
    BT: tl.constexpr,
    BS: tl.constexpr,
    REVERSE: tl.constexpr,
    HAS_SCALE: tl.constexpr,
    IS_VARLEN: tl.constexpr,
    HEAD_FIRST: tl.constexpr,
):
    i_s, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    i_b, i_h = i_bh // H, i_bh % H
    if IS_VARLEN:
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
        T = eos - bos
    else:
        bos, eos = i_b * T, i_b * T + T

    o_i = tl.arange(0, BT)
    if REVERSE:
        m_s = tl.where(o_i[:, None] <= o_i[None, :], 1., 0.)
    else:
        m_s = tl.where(o_i[:, None] >= o_i[None, :], 1., 0.)

    if HEAD_FIRST:
        p_s = tl.make_block_ptr(s + (bos * H + i_h*T)*S, (T, S), (S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
        p_o = tl.make_block_ptr(o + (bos * H + i_h*T)*S, (T, S), (S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
    else:
        p_s = tl.make_block_ptr(s + (bos * H + i_h) * S, (T, S), (H*S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
        p_o = tl.make_block_ptr(o + (bos * H + i_h) * S, (T, S), (H*S, 1), (i_t * BT, i_s * BS), (BT, BS), (1, 0))
    # [BT, BS]
    b_s = tl.load(p_s, boundary_check=(0, 1)).to(tl.float32)
    b_o = tl.dot(m_s, b_s, allow_tf32=False)
    if HAS_SCALE:
        b_o *= scale
    tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))

def test_chunk_local_cumsum_vector_kernel():
    # 设置随机种子以确保可重复性
    torch.manual_seed(42)

    # 定义参数
    B = 2        # 批量大小
    T = 64       # 序列长度
    H = 8        # 头的数量
    S = 32       # 向量维度
    BT = 16      # block大小 for T
    BS = 8       # block大小 for S

    # 生成随机输入张量
    device = 'npu'  # 使用NPU设备
    dtype = torch.float32  # 使用float32以匹配内核的内部类型

    # 输入张量
    s = torch.randn(B, T, H, S, dtype=dtype, device=device)
    o = torch.randn(B, T, H, S, dtype=dtype, device=device)
    scale = 0.5 #torch.tensor(0.5, dtype=dtype, device=device)  # 缩放因子
    cu_seqlens = torch.randint(low=0, high=T, size=(B + 1,), dtype=torch.int32, device=device)
    cu_seqlens[0] = 0
    cu_seqlens[-1] = T
    chunk_indices = torch.randint(low=0, high=B, size=(2 * T,), dtype=torch.int32, device=device)

    # 计算网格大小
    num_blocks_s = triton.cdiv(S, BS)
    num_blocks_t = triton.cdiv(T, BT)
    num_blocks_h = H
    grid = (num_blocks_s, num_blocks_t, num_blocks_h)

    # 启用功能标志
    REVERSE = False
    HAS_SCALE = True
    IS_VARLEN = True
    HEAD_FIRST = True

    # 调用内核函数
    chunk_local_cumsum_vector_kernel[grid](
        s, o, scale,
        cu_seqlens, chunk_indices, T,
        B=B, H=H, S=S, BT=BT, BS=BS,
        REVERSE=REVERSE, HAS_SCALE=HAS_SCALE,
        IS_VARLEN=IS_VARLEN, HEAD_FIRST=HEAD_FIRST
    )

if __name__ == "__main__":
    test_chunk_local_cumsum_vector_kernel()