import os
import itertools
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice
import argparse
import numpy as np

FLA_SINGLE_THREAD = os.getenv("TRITON_PRECISION_NPU_SINGLE_THREAD", '0') == '1'

def single_wrapper(grid, grid_size, kernel):   
    if not FLA_SINGLE_THREAD:
        kwargs = {'USE_PIDS': False}
        for dim in range(grid_size):
            kwargs[f"PID{dim}"] = 0
        kernel(grid, kwargs)
        return
    assert isinstance(grid, tuple)
    
    grid_ranges = []
    for dim in grid:
        grid_ranges.append(range(dim))
    single_grid = (1,) * grid_size

    for combination in itertools.product(*grid_ranges):
        kwargs = {'USE_PIDS': True}
        for idx, dim in enumerate(combination):
            kwargs[f"PID{idx}"] = dim
        kernel(single_grid, kwargs)

if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
    exp = tldevice.fast_expf
    exp2 = tldevice.exp2
    log = tldevice.fast_logf
    log2 = tldevice.fast_log2f
else:
    exp = tl.exp
    exp2 = tl.math.exp2
    log = tl.log
    log2 = tl.log2

@triton.jit
def compute_cycle(T, i_t, A, H, BT, o_i, offset, b_A):
    for i in range(2, min(16, T - i_t * 16)):
        b_a = -tl.load(A + (i_t * 16 + i) * H*BT + o_i + offset)
        b_a = b_a + tl.sum(b_a[:, None] * b_A, 0)
        b_A = tl.where((o_i == i)[:, None], b_a, b_A)
    return b_A

@triton.jit(do_not_specialize=['T', 'PID0', 'PID1'])
def solve_tril_16x16_kernel(
    A,
    Ai,
    cu_seqlens,
    chunk_indices,
    T,
    H: tl.constexpr,
    BT: tl.constexpr,
    IS_VARLEN: tl.constexpr,
    DOT_PRECISION: tl.constexpr,
    B: tl.constexpr,
    USE_PIDS: tl.constexpr,
    PID0,
    PID1
):
    if USE_PIDS:
        i_t, i_bh = PID0, PID1
    else:
        i_t, i_bh = tl.program_id(0), tl.program_id(1)
    i_b, i_h = i_bh // H, i_bh % H
    if IS_VARLEN:
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)
        T = eos - bos
    else:
        bos, eos = i_b * T, i_b * T + T
    o_i = tl.arange(0, 16)
    m_A = o_i[:, None] > o_i[None, :]
    m_I = o_i[:, None] == o_i[None, :]

    A = A + (bos*H + i_h) * BT
    Ai = Ai + (bos*H + i_h) * 16

    offset = (i_t * 16) % BT
    p_A = tl.make_block_ptr(A, (T, BT), (H*BT, 1), (i_t * 16, offset), (16, 16), (1, 0))
    b_A = tl.load(p_A, boundary_check=(0, 1)).to(tl.float32)
    b_A = -tl.where(m_A, b_A, 0)

    compute_cycle(T, i_t, A, H, BT, o_i, offset, b_A)
    b_A += m_I

    p_Ai = tl.make_block_ptr(Ai, (T, 16), (H*16, 1), (i_t * 16, 0), (16, 16), (1, 0))
    tl.store(p_Ai, b_A.to(p_Ai.dtype.element_ty, fp_downcast_rounding="rtne"), boundary_check=(0, 1))

def test_solve_tril_16x16_kernel(output_file):
    torch.manual_seed(42)

    B = 2        # 批量大小（传递给算子用于边界保护）
    T = 64       # 序列长度
    H = 8        # 头的数量
    BT = 16      # block大小 for T
    device = 'npu'  # 原设备选择
    dtype = torch.float16

    A = torch.randn(B, T, H, BT, dtype=dtype, device=device)
    Ai = torch.zeros(B, T, H, 16, dtype=dtype, device=device)
    cu_seqlens = torch.randint(low=0, high=T, size=(B + 1,), dtype=torch.int32, device=device)
    cu_seqlens[0] = 0
    cu_seqlens[-1] = T
    chunk_indices = []
    for t_global in range(T):
        n = 0 if t_global < 32 else 1  # i_n仅0/1
        t_local = t_global % 32         # i_t在序列内
        chunk_indices.extend([n, t_local])
    chunk_indices = torch.tensor(chunk_indices, dtype=torch.int32, device=device)

    num_blocks_t = triton.cdiv(T, 16)
    num_blocks_bh = B * H  # 原代码为H，修正为B*H
    grid = (num_blocks_t, num_blocks_bh)

    IS_VARLEN = True
    DOT_PRECISION = "ieee"

    def kernel(cur_grid, pid_args):
        solve_tril_16x16_kernel[cur_grid](
            A, Ai,
            cu_seqlens, chunk_indices, T,
            H=H, BT=BT,
            IS_VARLEN=IS_VARLEN,
            DOT_PRECISION=DOT_PRECISION,
            B=B,
            **pid_args
        )
    single_wrapper(grid, 2, kernel)

    ai_numpy = Ai.cpu().detach().numpy()
    np.savetxt(output_file, ai_numpy.flatten(), fmt="%.10f")
    print(f"Output saved to {output_file} (TMA enabled: False)")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='test_solve_tril_16x16_kernel')
    parser.add_argument('--output', type=str, default='default_output.txt', 
                        help='Output file name (default: default_output.txt)')
    args = parser.parse_args()
    test_solve_tril_16x16_kernel(args.output)


