import os
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice
import argparse
import numpy as np

def setup_global_tma_descriptor():
    if hasattr(triton.language, '_experimental_make_tensor_descriptor'):
        return triton.language._experimental_make_tensor_descriptor
    elif hasattr(triton.language, 'make_tensor_descriptor'):
        return triton.language.make_tensor_descriptor
    else:
        @triton.jit
        def dummy_make_tensor_descriptor(base, shape, strides, block_shape, _builder=None):
            return None
        return dummy_make_tensor_descriptor

make_tensor_descriptor = setup_global_tma_descriptor()

if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
    exp = tldevice.fast_expf
    exp2 = tldevice.exp2
    log = tldevice.fast_logf
    log2 = tldevice.fast_log2f
else:
    exp = tl.exp
    exp2 = tl.math.exp2
    log = tl.log
    log2 = tl.log2

@triton.heuristics({
    'IS_VARLEN': lambda args: args['cu_seqlens'] is not None,
    'B': lambda args: args['B']
})
@triton.jit(do_not_specialize=['T'])
def solve_tril_16x16_kernel(
    A,
    Ai,
    cu_seqlens,
    chunk_indices,
    T,
    H: tl.constexpr,
    BT: tl.constexpr,
    USE_TMA: tl.constexpr,
    IS_VARLEN: tl.constexpr,
    DOT_PRECISION: tl.constexpr,
    B: tl.constexpr
):
    i_t, i_bh = tl.program_id(0), tl.program_id(1)
    i_b, i_h = i_bh // H, i_bh % H
    if IS_VARLEN:
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
        T = eos - bos
    else:
        bos, eos = i_b * T, i_b * T + T
    o_i = tl.arange(0, 16)
    m_A = o_i[:, None] > o_i[None, :]
    m_I = o_i[:, None] == o_i[None, :]

    A = A + (bos*H + i_h) * BT
    Ai = Ai + (bos*H + i_h) * 16

    offset = (i_t * 16) % BT
    if not USE_TMA:
        p_A = tl.make_block_ptr(A, (T, BT), (H*BT, 1), (i_t * 16, offset), (16, 16), (1, 0))
        b_A = tl.load(p_A, boundary_check=(0, 1)).to(tl.float32)
    else:
        desc = make_tensor_descriptor(A, [T, BT], [H*BT, 1], [16, 16])
        desc_o = make_tensor_descriptor(Ai, [T, 16], [H*16, 1], [16, 16])
        b_A = desc.load([i_t * 16, offset]).to(tl.float32)
    b_A = -tl.where(m_A, b_A, 0)

    for i in range(2, min(16, T - i_t * 16)):
        b_a = -tl.load(A + (i_t * 16 + i) * H*BT + o_i + offset)
        b_a = b_a + tl.sum(b_a[:, None] * b_A, 0)
        b_A = tl.where((o_i == i)[:, None], b_a, b_A)
    b_A += m_I

    if not USE_TMA:
        p_Ai = tl.make_block_ptr(Ai, (T, 16), (H*16, 1), (i_t * 16, 0), (16, 16), (1, 0))
        tl.store(p_Ai, b_A.to(p_Ai.dtype.element_ty, fp_downcast_rounding="rtne"), boundary_check=(0, 1))
    else:
        desc_o.store([i_t * 16, 0], b_A.to(desc_o.dtype, fp_downcast_rounding="rtne"))

def test_solve_tril_16x16_kernel(output_file):
    torch.manual_seed(42)

    B = 2        # 批量大小（传递给算子用于边界保护）
    T = 64       # 序列长度
    H = 8        # 头的数量
    BT = 16      # block大小 for T
    device = 'npu'  # 原设备选择
    dtype = torch.float16

    A = torch.randn(B, T, H, BT, dtype=dtype, device=device)
    Ai = torch.zeros(B, T, H, 16, dtype=dtype, device=device)
    cu_seqlens = torch.randint(low=0, high=T, size=(B + 1,), dtype=torch.int32, device=device)
    cu_seqlens[0] = 0
    cu_seqlens[-1] = T
    chunk_indices = []
    for t_global in range(T):
        n = 0 if t_global < 32 else 1  # i_n仅0/1
        t_local = t_global % 32         # i_t在序列内
        chunk_indices.extend([n, t_local])
    chunk_indices = torch.tensor(chunk_indices, dtype=torch.int32, device=device)

    num_blocks_t = triton.cdiv(T, 16)
    num_blocks_bh = B * H  # 原代码为H，修正为B*H
    grid = (num_blocks_t, num_blocks_bh)

    is_interpret_mode = os.environ.get('TRITON_INTERPRET', '0') == '1'
    # 判断是否支持TMA（全局make_tensor_descriptor是否为有效实现）
    is_tma_supported = not (make_tensor_descriptor.__name__ == 'dummy_make_tensor_descriptor')
    USE_TMA = is_tma_supported and not is_interpret_mode  # 仅在支持且非解释器时启用TMA

    IS_VARLEN = True
    DOT_PRECISION = "ieee"

    solve_tril_16x16_kernel[grid](
        A, Ai,
        cu_seqlens, chunk_indices, T,
        H=H, BT=BT,
        USE_TMA=USE_TMA, IS_VARLEN=IS_VARLEN,
        DOT_PRECISION=DOT_PRECISION,
        B=B
    )

    ai_numpy = Ai.cpu().detach().numpy()
    np.savetxt(output_file, ai_numpy.flatten(), fmt="%.10f")
    print(f"Output saved to {output_file} (TMA enabled: {USE_TMA})")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='test_solve_tril_16x16_kernel')
    parser.add_argument('--output', type=str, default='default_output.txt', 
                        help='Output file name (default: default_output.txt)')
    args = parser.parse_args()
    test_solve_tril_16x16_kernel(args.output)


