import os
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice
import argparse
import numpy as np

   
@triton.heuristics({
    'IS_VARLEN': lambda args: args['cu_seqlens'] is not None,
})
@triton.jit(do_not_specialize=['T'])
def chunk_scaled_dot_kkt_fwd_kernel_intra_sub_intra(
    k,
    g,
    beta,
    A,
    cu_seqlens,
    chunk_indices,
    T,
    H: tl.constexpr,
    K: tl.constexpr,
    BT: tl.constexpr,
    BC: tl.constexpr,
    BK: tl.constexpr,
    IS_VARLEN: tl.constexpr,
    USE_FAST_OPS: tl.constexpr,
):
    i_t, i_i, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    i_b, i_h = i_bh // H, i_bh % H
    if IS_VARLEN:
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
        T = eos - bos
    else:
        bos, eos = i_b * T, i_b * T + T

    if i_t * BT + i_i * BC >= T:
        return

    o_i = tl.arange(0, BC)
    o_k = tl.arange(0, BK)
    m_k = o_k < K
    m_A = (i_t * BT + i_i * BC + o_i) < T
    o_A = (bos + i_t * BT + i_i * BC + o_i) * H*BT + i_h * BT + i_i * BC

    p_k = tl.make_block_ptr(k + (bos * H + i_h) * K, (T, K), (H*K, 1), (i_t * BT + i_i * BC, 0), (BC, BK), (1, 0))
    p_g = tl.make_block_ptr(g + (bos * H + i_h) * K, (T, K), (H*K, 1), (i_t * BT + i_i * BC, 0), (BC, BK), (1, 0))
    p_beta = beta + (bos + i_t * BT + i_i * BC + o_i) * H + i_h

    b_k = tl.load(p_k, boundary_check=(0, 1)) * tl.load(p_beta, mask=m_A, other=0)[:, None]
    b_g = tl.load(p_g, boundary_check=(0, 1))

    p_kt = k + (bos + i_t * BT + i_i * BC) * H*K + i_h * K + o_k
    p_gk = g + (bos + i_t * BT + i_i * BC) * H*K + i_h * K + o_k
    for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
        b_kt = tl.load(p_kt, mask=m_k, other=0).to(tl.float32)
        b_gk = tl.load(p_gk, mask=m_k, other=0).to(tl.float32)
        # b_A = tl.sum(b_k * b_kt[None, :] * exp(b_g - b_gk[None, :]), 1)
        if USE_FAST_OPS:
            b_A = tl.sum(b_k * b_kt[None, :] * tldevice.fast_expf(b_g - b_gk[None, :]), 1)
        else:
            b_A = tl.sum(b_k * b_kt[None, :] * tl.exp(b_g - b_gk[None, :]), 1)
        b_A = tl.where(o_i > j, b_A, 0.)

        tl.store(A + o_A + j, b_A, mask=m_A)
        p_kt += H*K
        p_gk += H*K

def test_chunk_scaled_dot_kkt_fwd_kernel_intra_sub_intra(output_file):
    # 设置随机种子以确保可重复性
    torch.manual_seed(42)

    # 定义参数
    B = 2        # 批量大小
    T = 64       # 序列长度
    H = 8        # 头的数量
    K = 32       # Key/Value的维度
    BT = 16      # block大小 for T
    BC = 4       # block大小 for chunk
    BK = 8       # block大小 for K

    # 生成随机输入张量
    device = 'npu'  # 使用NPU设备
    dtype = torch.float16

    # 输入张量
    k = torch.randn(B, T, H, K, dtype=dtype, device=device)
    g = torch.randn(B, T, H, K, dtype=dtype, device=device)
    beta = torch.randn(B, T, H, dtype=dtype, device=device)
    A = torch.randn(B, T, H, BT, dtype=dtype, device=device)
    cu_seqlens = torch.randint(low=0, high=T, size=(B + 1,), dtype=torch.int32, device=device)
    cu_seqlens[0] = 0
    cu_seqlens[-1] = T
    chunk_indices = torch.randint(low=0, high=B, size=(2 * T,), dtype=torch.int32, device=device)

    # 计算网格大小
    num_blocks_t = triton.cdiv(T, BT)
    num_blocks_i = triton.cdiv(T, BC)
    num_blocks_h = H
    grid = (num_blocks_t, num_blocks_i, num_blocks_h)

    # 启用功能标志
    IS_VARLEN = True  # 启用变长序列处理

    USE_FAST_OPS = False
    if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
        print("FLA_USE_FAST_OPS is 0 \n");
        USE_FAST_OPS = True

    chunk_scaled_dot_kkt_fwd_kernel_intra_sub_intra[grid](
        k, g, beta, A,
        cu_seqlens, chunk_indices, T,
        H=H, K=K, BT=BT, BC=BC, BK=BK,
        IS_VARLEN=IS_VARLEN,
        USE_FAST_OPS=USE_FAST_OPS
    )

    a_numpy = A.cpu().detach().numpy()
    np.savetxt(output_file, a_numpy.reshape(-1, a_numpy.shape[-1]))

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='test_chunk_scaled_dot_kkt_fwd_kernel_intra_sub_intra')
    parser.add_argument('--output', type=str, default='default_output.txt', 
                        help='Output file name (default: default_output.txt)')
    args = parser.parse_args()
    test_chunk_scaled_dot_kkt_fwd_kernel_intra_sub_intra(args.output)

