import torch
import torch_npu
import triton
import triton.language as tl

UB_BYTES = 1024 * 192

@triton.jit
def _fwd_diag_kernel(
        Q, 
        K, 
        V, 
        Out, 
        S, 
        #
        b: tl.constexpr, 
        h: tl.constexpr, 
        n: tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        #
        INTER_BLOCK: tl.constexpr,    # 256
        BLOCK_SIZE_N: tl.constexpr,     # 32
        BLOCK_SIZE_D: tl.constexpr, 
        BLOCK_SIZE_E: tl.constexpr, 
    ):

    cal_type = tl.float32
    out_type = Out.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    inner_block_num = tl.cdiv(INTER_BLOCK, BLOCK_SIZE_N)
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_per_bh = inter_block_num * inner_block_num * block_num_e

    start_program_idx = 0
    start_task_idx = 0
    for bh_idx in tl.range(0, bh_num): 

        h_idx = bh_idx % h
        S_block_idx = S + h_idx
        s = tl.load(S_block_idx).to(cal_type)

        qk_offs = bh_idx * n * d
        vo_offs = bh_idx * n * e

        start_task_idx = program_idx - start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num
        
        for task_idx in tl.range(start_task_idx, block_num_per_bh, program_num): 

            block_idx_e = task_idx % block_num_e
            inner_block_total_idx = task_idx // block_num_e
            inter_block_idx = inner_block_total_idx // inner_block_num
            inner_block_idx = inner_block_total_idx % inner_block_num
            
            # inter block offsets
            inter_block_offs = inter_block_idx * INTER_BLOCK
            inter_block_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)

            # inner block offsets
            inner_block_offs = inner_block_idx * BLOCK_SIZE_N

            # ptr base
            qo_n_index = inter_block_offs + inner_block_offs + tl.arange(0, BLOCK_SIZE_N)
            d_index = tl.arange(0, BLOCK_SIZE_D)
            e_index = tl.arange(0, BLOCK_SIZE_E) + block_idx_e * BLOCK_SIZE_E

            q_2d_index_base = qo_n_index[:, None] * d + d_index[None, :]
            o_2d_index = qo_n_index[:, None] * e + e_index[None, :]

            Q_block_ptr_base = Q + qk_offs + q_2d_index_base
            O_block_ptr = Out + vo_offs + o_2d_index

            # cal
            qkv = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_E], dtype = cal_type)
            for i in tl.range(1 + inner_block_idx): 
                
                # ptr base
                kv_n_index = inter_block_offs + i * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
                
                k_2d_index_base = kv_n_index[:, None] * d + d_index[None, :]
                v_2d_index = kv_n_index[:, None] * e + e_index[None, :]

                K_block_ptr_base = K + qk_offs + k_2d_index_base
                V_block_ptr = V + vo_offs + v_2d_index

                # 因果掩码 + decay
                diff = qo_n_index[:, None] - kv_n_index[None, :]
                s_index = s * diff
                s_index = tl.where(diff >= 0, -s_index, float("-inf"))
                decay = tl.exp(s_index)

                # cal
                qk = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_N], dtype = cal_type)
                for d_idx in tl.range(block_num_d): 

                    Q_block_ptr = Q_block_ptr_base + d_idx * BLOCK_SIZE_D
                    K_block_ptr = K_block_ptr_base + d_idx * BLOCK_SIZE_D

                    d_index_move = d_index + d_idx * BLOCK_SIZE_D
                    # mask_q = qo_n_index[:, None] < n and d_index_move[None, :] < d
                    # mask_k = kv_n_index[:, None] < n and d_index_move[None, :] < d
                    mask_q = qo_n_index[:, None] < inter_block_bound and d_index_move[None, :] < d
                    mask_k = kv_n_index[:, None] < inter_block_bound and d_index_move[None, :] < d
                    
                    q = tl.load(Q_block_ptr, mask = mask_q, other = 0.0).to(cal_type)
                    k = tl.load(K_block_ptr, mask = mask_k, other = 0.0).to(cal_type)
                    k_trans = tl.trans(k)
                    qk += tl.dot(q, k_trans)
                
                qk = qk * decay

                # mask_v = kv_n_index[:, None] < n and e_index[None, :] < e
                mask_v = kv_n_index[:, None] < inter_block_bound and e_index[None, :] < e
                v = tl.load(V_block_ptr, mask = mask_v, other = 0.0).to(cal_type)
                qkv += tl.dot(qk, v)
            
            # mask_o = qo_n_index[:, None] < n and e_index[None, :] < e
            mask_o = qo_n_index[:, None] < inter_block_bound and e_index[None, :] < e
            tl.store(O_block_ptr, qkv.to(out_type), mask = mask_o)

        start_program_idx = (start_program_idx + block_num_per_bh) % program_num
        

        
    


# @triton.jit
# def _fwd_diag_kernel_old(
#         Q, 
#         K, 
#         V, 
#         Out, 
#         S, 
#         #
#         b: tl.constexpr, 
#         h: tl.constexpr, 
#         n: tl.constexpr,
#         d: tl.constexpr, 
#         e: tl.constexpr, 
#         #
#         BLOCK: tl.constexpr,    # 256
#         NUM_BLOCK: tl.constexpr,    # tl.cdiv(n, BLOCK)
#         CBLOCK: tl.constexpr,     # 32
#         # BLOCK_SIZE_K: tl.constexpr, 
#     ):
#     cal_type = tl.float32
#     out_type = Out.dtype.element_ty

#     # grid = (b * h * NUM_BLOCK, NUM_CBLOCK)
#     off = tl.program_id(0)
#     # [0, b * h] 中第几个 h
#     off_bh = off // NUM_BLOCK
#     # 当前 h 内第几个BLOCK
#     off_block = off % NUM_BLOCK
#     # 当前 block 内第几个CBLOCK
#     off_cblock = tl.program_id(1)
#     # 当前 b 中的[0, h) 中第几个h
#     off_h = off_bh % h 

#     # 定位当前 b 和 h 的 QK 起始偏移
#     qk_offset = off_bh * n * d
#     # 定位当前 b 和 h 的 V和O 起始偏移
#     v_offset = off_bh * n * e
#     o_offset = off_bh * n * e

#     # 在一个[n, d]和[n, e]内的BLOCK偏移
#     block_offset = off_block * BLOCK
#     qk_block_offset = block_offset * d
#     v_block_offset = block_offset * e
#     o_block_offset = block_offset * e
#     # CBOCK偏移
#     cblock_offset = off_cblock * CBLOCK
#     q_cblock_offset = cblock_offset * d
#     o_cblock_offset = cblock_offset * e

#     Q_block_ptr = (Q + qk_offset + qk_block_offset + q_cblock_offset +
#                    tl.arange(0, CBLOCK)[:, None] * d +
#                    tl.arange(0, d)[None, :])
#     K_block_ptr = (K + qk_offset + qk_block_offset +
#                     tl.arange(0, CBLOCK)[:, None] * d +
#                     tl.arange(0, d)[None, :])
#     V_block_ptr = (V + v_offset + v_block_offset +
#                    tl.arange(0, CBLOCK)[:, None] * e +
#                    tl.arange(0, e)[None, :])
#     O_block_ptr = (Out + o_offset + o_block_offset + o_cblock_offset +
#                    tl.arange(0, CBLOCK)[:, None] * e +
#                    tl.arange(0, e)[None, :])

#     # 当前 head 衰减率，一个head 对应 1个值
#     S_block_ptr = S + off_h
#     s = tl.load(S_block_ptr).to(cal_type)

#     i = off_cblock
#     # 当前 CBLOCK 的偏移
#     q_index = tl.arange(0, CBLOCK) + i * CBLOCK  # [i * CBOCK, (i + 1) * CBLOCK)
 
#     # mask 是n方向上的，输入的mask 的 shape 只有 (CBLOCK, 1)，但是load的时候会广播，结果为按行掩码
#     q = tl.load(Q_block_ptr,
#                 mask=block_offset + q_index[:, None] < n,
#                 other=0.0).to(cal_type)

#     qkv = tl.zeros([CBLOCK, e], dtype=cal_type)
#     # q不动，kv偏移CBLOCK
#     for j in range(i + 1):
#         kv_index = tl.arange(0, CBLOCK) + j * CBLOCK  # [j * CBLOCK, (j + 1) * CBLOCK)
#         # 掩码 + 衰减！q不能关注到未来的key信息
#         # diff = 0，则 q位置 = k位置，s_index = 0，decay = 1
#         # diff > 0，则 q位置 > k位置，s_index < 0，decay < 1
#         # diff < 0，则 q位置 < k位置，s_index = -inf，decay = 0
#         # diff.shape = s_index.shape = decay.shape = (CBLOCK, CBLOCK)
#         diff = q_index[:, None] - kv_index[None, :]
#         s_index = s * diff
#         s_index = tl.where(diff >= 0, -s_index, float("-inf"))
#         decay = tl.exp(s_index)

#         k = tl.load(
#             K_block_ptr,
#             mask=block_offset + kv_index[:, None] < n,
#             other=0.0,
#         ).to(cal_type)
#         v = tl.load(
#             V_block_ptr,
#             mask=block_offset + kv_index[:, None] < n,
#             other=0.0,
#         ).to(cal_type)

#         k_trans = tl.trans(k)
#         qk = tl.dot(q, k_trans) * decay

#         qkv += tl.dot(qk, v)

#         K_block_ptr += CBLOCK * d
#         V_block_ptr += CBLOCK * e

#     # Store the result
#     tl.store(
#         O_block_ptr,
#         qkv.to(out_type),
#         mask=block_offset + q_index[:, None] < n,
#     )

@triton.jit
def _fwd_kv_parallel(
    K,  # K.shape = (b, h, n, d)
    V,  # V.shape = (b, h, n, e)
    K_decay,    # k_decay.shape = (h, BLOCK)
    KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
    b: tl.constexpr,
    h: tl.constexpr,
    n: tl.constexpr,
    d: tl.constexpr,
    e: tl.constexpr,
    INTER_BLOCK: tl.constexpr,    # 256
    BLOCK_SIZE_N: tl.constexpr,  #  32 -> 64
    BLOCK_SIZE_D: tl.constexpr, 
    BLOCK_SIZE_E: tl.constexpr, 
):
    cal_type = tl.float32
    out_type = KV.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    inner_block_num = tl.cdiv(INTER_BLOCK, BLOCK_SIZE_N)
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_per_bh_inter_block = block_num_d * block_num_e

    start_program_idx = 0
    start_task_idx = 0
    for bh_inter_block_idx in tl.range(0, bh_num * inter_block_num): 

        inter_block_idx = bh_inter_block_idx % inter_block_num
        bh_idx = bh_inter_block_idx // inter_block_num
        h_idx = bh_idx % h

        k_offs = bh_idx * n * d
        v_offs = bh_idx * n * e
        kv_offs = bh_inter_block_idx * d * e
        kv_n_index_base = inter_block_idx * INTER_BLOCK + tl.arange(0, BLOCK_SIZE_N)
        kv_n_index_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)

        # deal with the last inter_block
        inter_block = INTER_BLOCK
        inner_block_actual_num = inner_block_num
        if inter_block_idx == inter_block_num - 1: 
            inter_block = n - inter_block_idx * INTER_BLOCK
            inner_block_actual_num = tl.cdiv(inter_block, BLOCK_SIZE_N)
        inter_block_shift = INTER_BLOCK - inter_block
        k_decay_offs = h_idx * INTER_BLOCK 
        k_decay_index_base = inter_block_shift + tl.arange(0, BLOCK_SIZE_N)
        k_decay_ptrs_base = K_decay + k_decay_offs + k_decay_index_base[None, :]

        start_task_idx = program_idx - start_program_idx
        # FIXME 这里原来把 start_task_idx 写成 start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num
        
        for task_idx in tl.range(start_task_idx, block_num_per_bh_inter_block, program_num): 
            
            block_idx_e = task_idx % block_num_e
            block_idx_d = task_idx // block_num_e

            k_d_index = block_idx_d * BLOCK_SIZE_D + tl.arange(0, BLOCK_SIZE_D)
            v_e_index = block_idx_e * BLOCK_SIZE_E + tl.arange(0, BLOCK_SIZE_E)

            k_2d_index_base = kv_n_index_base[:, None] * d + k_d_index[None, :]
            v_2d_index_base = kv_n_index_base[:, None] * e + v_e_index[None, :]
            k_block_ptrs_base = K + k_offs + k_2d_index_base
            v_block_ptrs_base = V + v_offs + v_2d_index_base

            kv_block = tl.zeros([BLOCK_SIZE_D, BLOCK_SIZE_E], dtype = cal_type)
            for inner_block_idx in tl.range(inner_block_actual_num): 

                k_block_ptrs = k_block_ptrs_base + inner_block_idx * BLOCK_SIZE_N * d
                v_block_ptrs = v_block_ptrs_base + inner_block_idx * BLOCK_SIZE_N * e
                k_decay_ptrs = k_decay_ptrs_base + inner_block_idx * BLOCK_SIZE_N

                kv_n_index = kv_n_index_base + inner_block_idx * BLOCK_SIZE_N
                # mask_k = kv_n_index[:, None] < n and k_d_index[None, :] < d
                # mask_v = kv_n_index[:, None] < n and v_e_index[None, :] < e
                mask_k = kv_n_index[:, None] < kv_n_index_bound and k_d_index[None, :] < d
                mask_v = kv_n_index[:, None] < kv_n_index_bound and v_e_index[None, :] < e

                k_decay_index = k_decay_index_base + inner_block_idx * BLOCK_SIZE_N
                mask_k_decay = k_decay_index[None, :] < INTER_BLOCK

                k = tl.load(k_block_ptrs, mask = mask_k, other = 0.0).to(cal_type)
                k_trans = tl.trans(k)
                v = tl.load(v_block_ptrs, mask = mask_v, other = 0.0).to(cal_type)
                k_decay = tl.load(k_decay_ptrs, mask = mask_k_decay, other = 0.0).to(cal_type)
                kv_block += tl.dot(k_trans * k_decay, v)
            
            kv_2d_index_base = k_d_index[:, None] * e + v_e_index[None, :]
            kv_block_ptrs_base = KV + kv_offs + kv_2d_index_base
            mask_kv = k_d_index[:, None] < d and v_e_index[None, :] < e
            tl.store(kv_block_ptrs_base, kv_block.to(out_type), mask = mask_kv)

        start_program_idx = (start_program_idx + block_num_per_bh_inter_block) % program_num

    
        


# @triton.jit
# def _fwd_kv_parallel_old(
#     K,
#     V,
#     K_decay,    # k_decay.shape = (h, BLOCK)
#     KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
#     b: tl.constexpr,
#     h: tl.constexpr,
#     n: tl.constexpr,
#     d: tl.constexpr,
#     e: tl.constexpr,
#     BLOCK: tl.constexpr,    # 256
#     NUM_BLOCK,  # tl.cdiv(n, BLOCK)
#     CBLOCK: tl.constexpr,  #  32 -> 64
#     NUM_CBLOCK: tl.constexpr,   # BLOCK // CBLOCK
# ):
#     # grid = (b * h, NUM_BLOCK)

#     cal_type = tl.float32
#     output_type = KV.dtype.element_ty

#     off_bh = tl.program_id(0)
#     off_block = tl.program_id(1)

#     off_h = off_bh % h 

#     block_offset = off_block * BLOCK

#     k_block_offset = block_offset * d
#     v_block_offset = block_offset * e
#     kv_block_offset = off_block * d * e

#     k_offset = off_bh * n * d
#     v_offset = off_bh * n * e
#     kv_offset = off_bh * NUM_BLOCK * d * e

#     K_block_ptr = (K + k_offset + k_block_offset +
#                     tl.arange(0, CBLOCK)[:, None] * d +
#                     tl.arange(0, d)[None, :])
#     V_block_ptr = (V + v_offset + v_block_offset +
#                    tl.arange(0, CBLOCK)[:, None] * e +
#                    tl.arange(0, e)[None, :])
#     KV_block_ptr = (KV + kv_offset + kv_block_offset +
#                     tl.arange(0, d)[:, None] * e +
#                     tl.arange(0, e)[None, :])
#     # 从 0 - BLOCK-1 位置，对应的值的幂次分别为 BLOCK-1 到 0
#     k_decay_ptr = (K_decay + off_h * BLOCK + tl.arange(0, CBLOCK)[None, :])

    
#     kv_index = tl.arange(0, CBLOCK)
#     # 假设一个block对应的 tokens 范围是 [256, 512)
#     # 则计算 token 511 的时候，需要累加到 k256^T * v256 + k257^T * v257 + ... + k511^T * v511
#     # 这个是当前 B、H 的 一个block 需要缓存下来的无需掩码的部分的 kv_cache ，乘以1个block内不同tokens对应的不同幂次的衰减
#     kv = tl.zeros([d, e], dtype=tl.float32)

#     # n可能没有完全被 BLOCK 整除，这里计算最后一个实际 block 长度或当前非末尾block长度BLOCK
#     if off_block == NUM_BLOCK - 1:
#         split_n = n - (NUM_BLOCK - 1) * BLOCK
#     else:
#         split_n = BLOCK
#     # split_n 是尾block时可能没有完全被 CBLOCK 整除，这里计算的是 CBLOCK 与 实际的尾 cblock 长度 的差 
#     left_shift = tl.cdiv(split_n, CBLOCK) * CBLOCK - split_n
#     # split_n 实际需要分几个 CBLOCK
#     num_blocks = min(tl.cdiv(split_n, CBLOCK), NUM_CBLOCK)
#     # TODO 跳过CBLOCK？
#     k_decay_ptr += (NUM_CBLOCK - num_blocks) * CBLOCK

#     for j in range(num_blocks):
#         left_bound = (1 - j) * left_shift
#         # 依然是从小到大偏移，但是这样处理相当于在最先计算了尾 cblock 长度的 tokens量
#         # k.shape = v.shape = (CBLOCK, d) or (CBLOCK - left_shift, e)
#         k = tl.load(K_block_ptr - left_shift * d,
#                     mask=kv_index[:, None] >= left_bound,
#                     other=0.0).to(cal_type)
#         v = tl.load(V_block_ptr - left_shift * e,
#                     mask=kv_index[:, None] >= left_bound,
#                     other=0.0).to(cal_type)
#         # 为什么 k_decay 是偏移整个CBLOCK?原因在于k v 读入的时候还是读了 整个 CBLOCK，
#         # 但是向左取了left_shift的偏移，这部分偏移读入值为0，乘以 k_decay没有影响最终结果
#         k_decay = tl.load(k_decay_ptr).to(cal_type)
#         k_trans = tl.trans(k)
#         kv += tl.dot(k_trans * k_decay, v)

#         K_block_ptr += CBLOCK * d
#         V_block_ptr += CBLOCK * e
#         k_decay_ptr += CBLOCK

    
#     tl.store(KV_block_ptr, kv.to(output_type))


@triton.jit
def _fwd_kv_reduce(
        S, 
        KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
        KV_HISTORY,     # kv_history.shape = (b, h, d, e)
        b: tl.constexpr, 
        h: tl.constexpr, 
        n : tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        INTER_BLOCK: tl.constexpr,
        BLOCK_SIZE_D: tl.constexpr, 
        BLOCK_SIZE_E: tl.constexpr, 
    ):

    cal_type = tl.float32
    kv_type = KV.dtype.element_ty
    kv_history_type = KV_HISTORY.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_kv = block_num_d * block_num_e

    start_program_idx = 0
    start_task_idx = 0 
    for bh_idx in tl.range(bh_num): 

        h_idx = bh_idx % h
        s_ptr = S + h_idx
        s = tl.load(s_ptr).to(cal_type)

        kv_offs = bh_idx * inter_block_num * d * e
        kv_history_offs = bh_idx * d * e

        start_task_idx = program_idx - start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num

        for task_idx in tl.range(start_task_idx, block_num_kv, program_num): 

            task_idx_e = task_idx % block_num_e
            task_idx_d = task_idx // block_num_e

            index_d = task_idx_d * BLOCK_SIZE_D + tl.arange(0, BLOCK_SIZE_D)
            index_e = task_idx_e * BLOCK_SIZE_E + tl.arange(0, BLOCK_SIZE_E)
            index_2d = index_d[:, None] * e + index_e[None, :]

            kv_history_ptrs = KV_HISTORY + kv_history_offs + index_2d
            kv_ptrs_base = KV + kv_offs + index_2d

            same_mask = index_d[:, None] < d and index_e[None, :] < e
            kv_pre = tl.load(kv_history_ptrs, mask = same_mask, other = 0.0).to(cal_type)

            for inter_block_idx in tl.range(inter_block_num): 

                kv_ptrs = kv_ptrs_base + inter_block_idx * d * e

                # cal decay power
                inter_block = min(n - inter_block_idx * INTER_BLOCK, INTER_BLOCK)
                inter_block_decay = tl.exp( -s * inter_block)

                kv_cur = tl.load(kv_ptrs, mask = same_mask, other = 0.0).to(cal_type)

                tl.store(kv_ptrs, kv_pre.to(kv_type), mask = same_mask)

                kv_pre = inter_block_decay * kv_pre + kv_cur
            
            
            tl.store(kv_history_ptrs, kv_pre.to(kv_history_type), mask = same_mask)

        start_program_idx = (start_program_idx + block_num_kv) % program_num




# @triton.jit
# def _fwd_kv_reduce_old(
#         S, 
#         KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
#         KV_HISTORY,     # kv_history.shape = (b, h, d, e)
#         b: tl.constexpr, 
#         h: tl.constexpr, 
#         n : tl.constexpr,
#         d: tl.constexpr, 
#         e: tl.constexpr, 
#         BLOCK: tl.constexpr,
#         NUM_BLOCK : tl.constexpr, 
#         D_HALF: tl.constexpr, 
#     ):
#     # grid = (b * h, NUM_FBLOCK = 1)
#     cal_type = tl.float32
#     kv_type = KV.dtype.element_ty
#     kv_history_type = KV_HISTORY.dtype.element_ty
    
#     off_bh = tl.program_id(0)
#     off_h = off_bh % h 

#     kv_offset = off_bh * NUM_BLOCK * d * e
#     kv_history_offset = off_bh * d * e
#     s_ptrs = S + off_h
#     s = tl.load(s_ptrs).to(cal_type)

#     # FIXME ub overflow

#     # KV_block_ptr = (KV + kv_offset 
#     #                 + tl.arange(0, d)[:, None] * e 
#     #                 + tl.arange(0, e)[None, :])
    
#     # # 一次处理一个 b * h 的 kv_history
    
#     # KV_HISTORY_block_ptr = (KV_HISTORY + kv_history_offset +
#     #                         tl.arange(0, d)[:, None] * e +
#     #                         tl.arange(0, e)[None, :])

#     # kv_pre = tl.load(KV_HISTORY_block_ptr).to(cal_type)

#     # # 循环处理 KV 的 dim2，将 NUM_BLOCK 个 d * e 的 各个 BLOCK 对应的 kv_cache 加载出来，乘以衰减加上 kv_history
#     # for i in range(NUM_BLOCK): 
#     #     block_size = min(n - i * BLOCK, BLOCK)
#     #     block_decay = tl.exp(-s.to(tl.float32) * block_size)

#     #     kv_cur = tl.load(KV_block_ptr).to(cal_type)
#     #     # 先前的 KV[b][h][NUM_BLOCK] 只有 block 
#     #     # 最终每个 KV[b][h][i]存储的是这个 block 的
#     #     tl.store(KV_block_ptr, kv_pre.to(kv_type))

#     #     # 这里将之前的 NUM_BLOCK 的 最后一块 KV_CACHE乘以衰减然后累加，对应了 torch 中的算法实现。
#     #     kv_pre = block_decay * kv_pre + kv_cur
#     #     KV_block_ptr += d * e

#     # tl.store(KV_HISTORY_block_ptr, kv_pre.to(kv_history_type))

#     # 注意， arange's arguments must be of type tl.constexpr
#     KV_block_ptr = (KV + kv_offset 
#                     + tl.arange(0, D_HALF)[:, None] * e 
#                     + tl.arange(0, e)[None, :])
    
#     # 一次处理一个 b * h 的 kv_history
    
#     KV_HISTORY_block_ptr = (KV_HISTORY + kv_history_offset 
#                             + tl.arange(0, D_HALF)[:, None] * e 
#                             + tl.arange(0, e)[None, :])

#     kv_pre = tl.load(KV_HISTORY_block_ptr).to(cal_type)

#     # 循环处理 KV 的 dim2，将 NUM_BLOCK 个 d * e 的 各个 BLOCK 对应的 kv_cache 加载出来，乘以衰减加上 kv_history
#     for i in range(NUM_BLOCK): 
#         block_size = min(n - i * BLOCK, BLOCK)
#         block_decay = tl.exp(-s.to(tl.float32) * block_size)

#         kv_cur = tl.load(KV_block_ptr).to(cal_type)
#         # 先前的 KV[b][h][NUM_BLOCK] 只有 block 
#         # 最终每个 KV[b][h][i]存储的是这个 block 的
#         tl.store(KV_block_ptr, kv_pre.to(kv_type))

#         # 这里将之前的 NUM_BLOCK 的 最后一块 KV_CACHE乘以衰减然后累加，对应了 torch 中的算法实现。
#         kv_pre = block_decay * kv_pre + kv_cur
#         KV_block_ptr += d * e

#     tl.store(KV_HISTORY_block_ptr, kv_pre.to(kv_history_type))

#     # 另一半
#     KV_block_ptr = (KV + kv_offset 
#                     + (tl.arange(0, D_HALF)[:, None] + D_HALF) * e 
#                     + tl.arange(0, e)[None, :])
    
#     # 一次处理一个 b * h 的 kv_history
    
#     KV_HISTORY_block_ptr = (KV_HISTORY + kv_history_offset 
#                             + ( tl.arange(0, D_HALF)[:, None] + D_HALF) * e 
#                             + tl.arange(0, e)[None, :])

#     kv_pre = tl.load(KV_HISTORY_block_ptr).to(cal_type)

#     # 循环处理 KV 的 dim2，将 NUM_BLOCK 个 d * e 的 各个 BLOCK 对应的 kv_cache 加载出来，乘以衰减加上 kv_history
#     for i in range(NUM_BLOCK): 
#         block_size = min(n - i * BLOCK, BLOCK)
#         block_decay = tl.exp(-s.to(tl.float32) * block_size)

#         kv_cur = tl.load(KV_block_ptr).to(cal_type)
#         # 先前的 KV[b][h][NUM_BLOCK] 只有 block 
#         # 最终每个 KV[b][h][i]存储的是这个 block 的
#         tl.store(KV_block_ptr, kv_pre.to(kv_type))

#         # 这里将之前的 NUM_BLOCK 的 最后一块 KV_CACHE乘以衰减然后累加，对应了 torch 中的算法实现。
#         kv_pre = block_decay * kv_pre + kv_cur
#         KV_block_ptr += d * e

#     tl.store(KV_HISTORY_block_ptr, kv_pre.to(kv_history_type))


@triton.jit
def _fwd_none_diag_kernel(
        Q, 
        Out, 
        S, 
        KV, 
        #
        b: tl.constexpr, 
        h: tl.constexpr, 
        n: tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        #
        INTER_BLOCK: tl.constexpr,    # 256
        BLOCK_SIZE_N: tl.constexpr,     # 32
        BLOCK_SIZE_D: tl.constexpr, 
        BLOCK_SIZE_E: tl.constexpr, 
    ):
    
    cal_type = tl.float32
    out_type = Out.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    inner_block_num = tl.cdiv(INTER_BLOCK, BLOCK_SIZE_N)
    block_num_n = inter_block_num * inner_block_num
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_per_bh = block_num_n * block_num_e

    start_program_idx = 0
    start_task_idx = 0
    for bh_idx in tl.range(0, bh_num): 

        h_idx = bh_idx % h
        S_block_idx = S + h_idx
        s = tl.load(S_block_idx).to(cal_type)

        bh_offs_q = bh_idx * n * d
        bh_offs_o = bh_idx * n * e
        bh_offs_kv = bh_idx * inter_block_num *d * e

        start_task_idx = program_idx - start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num

        for task_idx in tl.range(start_task_idx, block_num_per_bh, program_num):
            
            inner_block_idx = task_idx // block_num_e
            block_idx_e = task_idx % block_num_e
            inter_block_idx = inner_block_idx // inner_block_num
            inner_inter_block_idx = inner_block_idx % inner_block_num

            e_index = block_idx_e * BLOCK_SIZE_E + tl.arange(0, BLOCK_SIZE_E)
            # n_index = inner_block_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
            n_index = inter_block_idx * INTER_BLOCK + inner_inter_block_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
            n_index_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)
            d_index_base = tl.arange(0, BLOCK_SIZE_D)

            q_2d_index_base = n_index[:, None] * d + d_index_base[None, :]
            kv_2d_index_base = d_index_base[:, None] * e + e_index[None, :]

            q_ptrs_base = Q + bh_offs_q + q_2d_index_base
            kv_ptrs_base = KV + bh_offs_kv + inter_block_idx * d * e + kv_2d_index_base

            qkv_block = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_E], dtype = cal_type)
            for d_idx in tl.range(block_num_d): 
                q_ptrs = q_ptrs_base + d_idx * BLOCK_SIZE_D
                kv_ptrs = kv_ptrs_base + d_idx * BLOCK_SIZE_D * e

                d_index = d_index_base + d_idx * BLOCK_SIZE_D

                # mask_q = n_index[:, None] < n and d_index[None, :] < d
                mask_q = n_index[:, None] < n_index_bound and d_index[None, :] < d
                mask_kv = d_index[:, None] < d and e_index[None, :] < e

                q = tl.load(q_ptrs, mask = mask_q, other = 0.0).to(cal_type)
                kv = tl.load(kv_ptrs, mask = mask_kv, other = 0.0).to(cal_type)
                qkv_block += tl.dot(q, kv)


            inner_block_array = inner_inter_block_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + 1
            q_decay = tl.exp(-s * inner_block_array[:, None])
            qkv_none_diag_block = qkv_block * q_decay

            o_2d_index_base = n_index[:, None] * e + e_index[None, :]
            # mask_o = n_index[:, None] < n and e_index[None, :] < e
            mask_o = n_index[:, None] < n_index_bound and e_index[None, :] < e
            o_ptrs = Out + bh_offs_o + o_2d_index_base
            
            qkv_diag_block = tl.load(o_ptrs, mask = mask_o, other = 0.0).to(cal_type)
            out_block = (qkv_diag_block + qkv_none_diag_block).to(out_type)
            tl.store(o_ptrs, out_block, mask = mask_o)

        start_program_idx = (start_program_idx + block_num_per_bh) % program_num
            
        




# @triton.jit
# def _fwd_none_diag_kernel_old(
#     Q,  # Q.shape = (b, h, n, d)
#     Out,    # Out.shape = (b, h, n, e)
#     S,  # S.shape = (h, )
#     KV,
#     b: tl.constexpr,
#     h: tl.constexpr,
#     n: tl.constexpr,
#     d: tl.constexpr,
#     e: tl.constexpr,
#     BLOCK: tl.constexpr,
#     NUM_BLOCK: tl.constexpr,
#     CBLOCK: tl.constexpr,
#     NUM_CBLOCK: tl.constexpr,
# ):
#     # grid = (b * h, NUM_BLOCK * NUM_CBLOCK)
#     cal_type = tl.float32
#     out_type = Out.dtype.element_ty

#     off_bh = tl.program_id(0)
#     off_h = off_bh % h 

#     off_nc = tl.program_id(1)
#     off_n = off_nc // NUM_CBLOCK
#     off_c = off_nc % NUM_CBLOCK 
#     off_e = tl.program_id(2) # 0

#     n_offset = off_n * BLOCK
#     c_offset = off_c * CBLOCK
#     e_offset = off_e * e
#     block_offset = n_offset + c_offset

#     q_offset = off_bh * n * d + (n_offset + c_offset) * d
#     o_offset = off_bh * n * e + (n_offset + c_offset) * e + e_offset
#     kv_offset = off_bh * NUM_BLOCK * d * e + off_n * d * e + e_offset

#     Q_block_ptr = (Q + q_offset + tl.arange(0, CBLOCK)[:, None] * d +
#                    tl.arange(0, d)[None, :])
#     O_block_ptr = (Out + o_offset + tl.arange(0, CBLOCK)[:, None] * e +
#                    tl.arange(0, e)[None, :])
#     KV_block_ptr = (KV + kv_offset + tl.arange(0, d)[:, None] * e +
#                     tl.arange(0, e)[None, :])

#     S_block_ptr = S + off_h
#     s = tl.load(S_block_ptr).to(cal_type)

#     # FIXME 这里漏了一此衰减（已经加1）
#     c_array = tl.arange(0, CBLOCK) + 1

#     kv = tl.load(KV_block_ptr).to(cal_type)
#     q_index = block_offset + tl.arange(0, CBLOCK)

#     q = tl.load(Q_block_ptr, mask=q_index[:, None] < n,
#                 other=0.).to(cal_type)

#     q_decay = tl.exp(-s * (off_c * CBLOCK + c_array[:, None]))

#     qkv_none_diag = tl.dot(q, kv) * q_decay

#     qkv_diag = tl.load(O_block_ptr, mask=q_index[:, None] < n,
#                        other=0.).to(cal_type)

#     qkv = qkv_diag + qkv_none_diag

#     tl.store(O_block_ptr,
#              qkv.to(out_type),
#              mask=q_index[:, None] < n)



def lightning_attention(q, k, v, s, block_size, kv_history):

    """
    Apply lightning attention algorithm 
    to compute attention efficiently.
    
    Args:
        q: Query tensor of shape [batch, heads, seq_len, dim]
        k: Key tensor of shape [batch, heads, seq_len, dim]
        v: Value tensor of shape [batch, heads, seq_len, dim_v]
        ed: Decay rate tensor of shape [heads]
        block_size: Size of blocks for block-sparse attention
        kv_history: Optional key-value history from previous computations
        
    Returns:
        output: Attention output
        kv: Updated key-value history
    """
    
    cal_type = torch.float32
    input_type = q.dtype

    q = q.contiguous()
    k = k.contiguous()
    v = v.contiguous()
    s = s.contiguous()  
    
    # b->batch, h->head, n->tokens, d->q、k dimension, e->v dimension
    # q.shape = (b, h, n, d)
    # k.shape = (b, h, n, d)
    # v.shape = (b, h, n, e)
    # s.shape = (h, ) 衰减率
    b, h, n, d = q.shape
    e = v.shape[-1]

    # new
    if s.dim() == 1: 
        s = s.view(1, -1, 1, 1)

    if kv_history is None: 
        kv_history = torch.zeros((q.shape[0], q.shape[1], d, e), 
                                 dtype = cal_type, 
                                 device = q.device)
    else: 
        kv_history = kv_history.clone().contiguous().to(cal_type)


    o = torch.empty((b, h, n, e), dtype=cal_type, device=q.device)
    
    # 将序列 n 按照 BLOCK 拆分为块（向上除），块数为 NUM_BLOCK
    BLOCK = block_size
    NUM_BLOCK = triton.cdiv(n, BLOCK)
    # 再将 BLOCK 拆分为小块（向下除，但要求整除），小块数为 NUM_CBLOCK
    CBLOCK = 64
    # NUM_CBLOCK = BLOCK // CBLOCK
    # assert BLOCK % CBLOCK == 0, "BLOCK must be a multiple of CBLOCK"

    # [1, BLOCK], shape = (BLOCK)
    array = torch.arange(0, BLOCK, device=q.device) + 1  
    # array.reshape shape-> (1, BLOCK)， BLOCK - array.reshape(1, -1) 元素范围 [BLOCK-1, 0]（从大到小）
    # s.shape = (h, ) 衰减率?
    # k_decay.shape = (h, BLOCK), 每行元素为：e的s[h]的BLOCK-1次方，一直到 0次方
    # 这个对应于算法实现中，行i对应输出对应的 ki^T * vi 在计算新的 kv_cache的时候，都需要将前一 kv_cache乘以衰减再加上当前的ki^T * vi，
    # 得到新的 kv_cache，所以 i 越小的输出对应的 kv_cache，需要被乘以更多次衰减系数
    # 生成每个注意力头、每个序列块内位置的指数衰减系数，每个head对应一个 BLOCK 长度的向量
    k_decay = torch.exp(-s.to(cal_type) * (BLOCK - array.reshape(1, -1)))
    
    # ===========================================
    # grid = (b * h * NUM_BLOCK, NUM_CBLOCK)
    # _fwd_diag_kernel_old[grid](q,
    #                         k,
    #                         v,
    #                         o,
    #                         s,
    #                         #
    #                         b,
    #                         h,
    #                         n,
    #                         d,
    #                         e,
    #                         #
    #                         BLOCK=BLOCK,
    #                         NUM_BLOCK=NUM_BLOCK,
    #                         CBLOCK=CBLOCK)
    grid = (20, )
    _fwd_diag_kernel[grid](
                            q,
                            k,
                            v,
                            o,
                            s,
                            #
                            b,
                            h,
                            n,
                            d,
                            e,
                            INTER_BLOCK = BLOCK, 
                            BLOCK_SIZE_N = CBLOCK, 
                            BLOCK_SIZE_D = 128, 
                            BLOCK_SIZE_E = 128
                            )
    # ===========================================

    ## 这里是分了块的kv_cache，可能需要修改为 dim2为1，
    kv = torch.empty((b, h, NUM_BLOCK, d, e),
                        dtype=cal_type,
                        device=q.device)
    # grid = (b * h, NUM_BLOCK)
    # _fwd_kv_parallel_old[grid](
    #     k,
    #     v,
    #     k_decay,
    #     kv,
    #     b,
    #     h,
    #     n,
    #     d,
    #     e,
    #     BLOCK=BLOCK,
    #     NUM_BLOCK=NUM_BLOCK,
    #     CBLOCK=CBLOCK,
    #     NUM_CBLOCK=NUM_CBLOCK,
    # )

    grid = (20, )
    _fwd_kv_parallel[grid](
        k, 
        v, 
        k_decay, 
        kv, 
        b, 
        h, 
        n, 
        d, 
        e, 
        INTER_BLOCK = BLOCK, 
        BLOCK_SIZE_N = CBLOCK, 
        BLOCK_SIZE_D = 128, 
        BLOCK_SIZE_E = 128, 
    )

    # ===========================================
    # grid = (b * h, )
    # _fwd_kv_reduce_old[grid](s,
    #                     kv,
    #                     kv_history,
    #                     b,
    #                     h,
    #                     n,
    #                     d,
    #                     e,
    #                     BLOCK=BLOCK,
    #                     NUM_BLOCK=NUM_BLOCK,
    #                     D_HALF= d // 2,
    # ) 

    grid = (40, )
    _fwd_kv_reduce[grid](
        s, 
        kv, 
        kv_history, 
        b, 
        h, 
        n, 
        d, 
        e, 
        INTER_BLOCK = BLOCK, 
        BLOCK_SIZE_D = 64, 
        BLOCK_SIZE_E = 64, 
    )
    # ===========================================
    # grid = (b * h, NUM_BLOCK * NUM_CBLOCK)
    # _fwd_none_diag_kernel_old[grid](
    #     q,
    #     o,
    #     s,
    #     kv,
    #     b,
    #     h,
    #     n,
    #     d,
    #     e,
    #     BLOCK=BLOCK,
    #     NUM_BLOCK=NUM_BLOCK,
    #     CBLOCK=CBLOCK,
    #     NUM_CBLOCK=NUM_CBLOCK,
    # )
    grid = (20, )
    _fwd_none_diag_kernel[grid](
        q, 
        o, 
        s, 
        kv, 
        b, 
        h, 
        n, 
        d, 
        e, 
        INTER_BLOCK = BLOCK, 
        BLOCK_SIZE_N = CBLOCK, 
        BLOCK_SIZE_D = 128, 
        BLOCK_SIZE_E = 128, 
    )
    # ===========================================

    # ctx.save_for_backward(q, k, v, s, kv)
    # ctx.BLOCK = BLOCK

    return o.to(input_type), kv_history.to(input_type)


# lightning_attention_ = _attention.apply

