import argparse
import itertools
import os
import numpy as np
import torch
import triton
import triton.language as tl
import triton.language.extra.libdevice as tldevice

FLA_SINGLE_THREAD = os.getenv("TRITON_PRECISION_NPU_SINGLE_THREAD", '0') == '1'

def single_wrapper(grid, grid_size, kernel):   
    if not FLA_SINGLE_THREAD:
        kwargs = {'USE_PIDS': False}
        for dim in range(grid_size):
            kwargs[f"PID{dim}"] = 0
        kernel(grid, kwargs)
        return
    assert isinstance(grid, tuple)
    
    grid_ranges = []
    for dim in grid:
        grid_ranges.append(range(dim))
    single_grid = (1,) * grid_size

    for combination in itertools.product(*grid_ranges):
        kwargs = {'USE_PIDS': True}
        for idx, dim in enumerate(combination):
            kwargs[f"PID{idx}"] = dim
        kernel(single_grid, kwargs)

if os.environ.get('FLA_USE_FAST_OPS', '0') == '1':
    exp = tldevice.fast_expf
    exp2 = tldevice.exp2
    log = tldevice.fast_logf
    log2 = tldevice.fast_log2f
else:
    exp = tl.exp
    exp2 = tl.math.exp2
    log = tl.log
    log2 = tl.log2

@triton.heuristics({
    'USE_INITIAL_STATE': lambda args: args['cache'] is not None,
    'HAS_WEIGHT': lambda args: args['weight'] is not None,
    'HAS_BIAS': lambda args: args['bias'] is not None,
    'HAS_RESIDUAL': lambda args: args['residual'] is not None,
})
@triton.jit(do_not_specialize=['PID0', 'PID1'])
def causal_conv1d_update_kernel(
    x,
    cache,
    residual,
    y,
    weight,
    bias,
    D: tl.constexpr,
    W: tl.constexpr,
    BD: tl.constexpr,
    BW: tl.constexpr,
    ACTIVATION: tl.constexpr,
    USE_INITIAL_STATE: tl.constexpr,
    HAS_WEIGHT: tl.constexpr,
    HAS_BIAS: tl.constexpr,
    HAS_RESIDUAL: tl.constexpr,
    USE_PIDS: tl.constexpr = False,
    PID0=0,
    PID1=0
):
    if USE_PIDS:
        i_d, i_n, = PID0, PID1
    else:
        i_d, i_n = tl.program_id(0), tl.program_id(1)

    o_d = i_d * BD + tl.arange(0, BD)
    o_w = tl.arange(0, BW) + W - BW
    m_d = o_d < D
    m_w = o_w >= 0
    m_c = o_w < W - 1

    # [BD]
    b_x = tl.load(x + i_n * D + o_d, mask=m_d, other=0).to(tl.float32)

    if USE_INITIAL_STATE:
        # shift the cache by 1 with the last one being discarded
        p_cache = tl.make_block_ptr(cache + i_n * D*W, (D, W), (W, 1), (i_d * BD, W - BW + 1), (BD, BW), (1, 0))
        # [BD, BW]
        b_cache = tl.load(p_cache, boundary_check=(0, 1)).to(tl.float32)
        b_cache = tl.where(m_c[None, :], b_cache, b_x[:, None])
    else:
        b_cache = tl.zeros((BD, BW), dtype=tl.float32)

    if HAS_WEIGHT:
        b_w = tl.load(weight + o_d[:, None] * W + o_w, mask=m_d[:, None] & m_w, other=0)
        b_y = tl.sum(b_cache * b_w, 1)
    else:
        b_y = tl.sum(b_cache, 1)
    if HAS_BIAS:
        b_y += tl.load(bias + o_d, mask=m_d)

    if ACTIVATION == 'swish' or ACTIVATION == 'silu':
        b_y = b_y * tl.sigmoid(b_y)

    if HAS_RESIDUAL:
        b_y += tl.load(residual + i_n * D + o_d, mask=m_d, other=0)

    tl.store(y + i_n * D + o_d, tl.cast(b_y, dtype=y.dtype.element_ty, fp_downcast_rounding='rtne'), mask=m_d)

    if USE_INITIAL_STATE:
        b_cache = tl.cast(b_cache, dtype=cache.dtype.element_ty, fp_downcast_rounding='rtne')
        # update the cache in-place
        p_cache = tl.make_block_ptr(cache + i_n * D*W, (D, W), (W, 1), (i_d * BD, W - BW), (BD, BW), (1, 0))
        tl.store(p_cache, b_cache, boundary_check=(0, 1))

def test_causal_conv1d_update_kernel(output_file):
    torch.manual_seed(42)

    # 1. 参数定义（保持不变）
    B = 2        # 批次大小
    T = 1        # 序列长度
    D = 32       # 特征维度
    W = 8        # 卷积窗口大小
    BD = 8       # block大小 for D
    BW = 4       # block大小 for W

    # 2. 输入张量（保持不变）
    device = 'npu'
    dtype = torch.float32
    x = torch.randn(B, T, D, dtype=dtype, device=device)
    cache = torch.randn(B, D, W, dtype=dtype, device=device)  # 维度 [B, D, W]
    residual = torch.randn(B, T, D, dtype=dtype, device=device)
    y = torch.randn(B, T, D, dtype=dtype, device=device)
    weight = torch.randn(D, W, dtype=dtype, device=device)
    bias = torch.randn(D, dtype=dtype, device=device)

    num_blocks_d = triton.cdiv(D, BD)
    grid = (num_blocks_d, B, T)

    # 4. 功能标志（保持不变）
    USE_INITIAL_STATE = True
    HAS_WEIGHT = True
    HAS_BIAS = True
    HAS_RESIDUAL = True
    ACTIVATION = 'silu'

    if not USE_INITIAL_STATE:
        cache = None

    if FLA_SINGLE_THREAD:
        BD = 32
        grid = (triton.cdiv(D, BD), T)
    else:
        print("WARNING: in multithreaded NPU run mode given code contains cache-access data races, so results could be slightly different from interpreter")
        def grid(meta): return (triton.cdiv(D, meta['BD']), T)
        
    def kernel(cur_grid, args):
        causal_conv1d_update_kernel[cur_grid](
            x=x,
            cache=cache,
            residual=residual,
            y=y,
            weight=weight,
            bias=bias,
            D=D,
            W=W,
            BD=BD,
            BW=BW,
            USE_INITIAL_STATE=USE_INITIAL_STATE, 
            HAS_WEIGHT=HAS_WEIGHT, 
            HAS_BIAS=HAS_BIAS, 
            HAS_RESIDUAL=HAS_RESIDUAL, 
            ACTIVATION=ACTIVATION,
            **args
        )
    single_wrapper(grid, 2, kernel)

    # 6. 恢复y的原始形状并保存
    y = y.view(B, T, D)  # 还原为 [B, T, D]
    y_numpy = y.cpu().detach().numpy()
    np.savetxt(output_file, y_numpy.reshape(-1, y_numpy.shape[-1]))

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Test Causal Conv1D Update Kernel')
    parser.add_argument('--output', type=str, default='default_output.txt', 
                        help='Output file name (default: default_output.txt)')
    args = parser.parse_args()
    test_causal_conv1d_update_kernel(args.output)
