import torch
import triton
import triton.language as tl
from triton.runtime.driver import driver

UB_BYTES = 1024 * 192

@triton.jit
def _fwd_diag_kernel(
        Q, 
        K, 
        V, 
        Out, 
        S, 
        #
        b: tl.constexpr, 
        h: tl.constexpr, 
        n: tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        #
        INTER_BLOCK: tl.constexpr,    # 256
        BLOCK_SIZE_N: tl.constexpr,     # 32
        BLOCK_SIZE_D: tl.constexpr, 
        BLOCK_SIZE_E: tl.constexpr, 
    ):
    '''
    按照 INTER_BLOCK 划分输入序列 n, 
    将因果注意力输出划分为多个 [INTER_BLOCK, INTER_BLOCK] 块
    当前kernel处理所有对角块. 
    
    算法描述（变量不对应代码）：
    对于当前 batch 的 INTER_BLOCK i, 对应序列为 [i * INTER_BLOCK, (i+1) * INTER_BLOCK) 或 [i * INTER_BLOCK, n)
    计算结果 Out[b][h]_j = Q[b][h]_j * ( Σ K[b][h]_k^T * V[b][h]_k^T * e^(-s[h] * (j-k))), 
    j ∈ [0, n), k ∈ [i * INTER_BLOCK, (i+1) * INTER_BLOCK) 或 [i * INTER_BLOCK, n) 且 k <= j. 

    实现上（变量对应代码）：
    将 inter_block_num = cdiv(n, INTER_BLOCK) 个 INTER_BLOCK 划分为 inner_block_num = cdiv(INTER_BLOCK, BLOCK_SIZE_N) 个 inner_block,
    此时总任务划分为，将每个 inner_block 对应的 q 行块，定位到其 inter_block_idx 和 inner_block_idx, 
    设 qo_n_index = inter_block_idx * INTER_BLOCK + inner_block_idx * BLOCK_SIZE_N + arange(0, BLOCK_SIZE_N), 
    kv_n_index = inter_block_idx * INTER_BLOCK + i * BLOCK_SIZE_N + arange(0, BLOCK_SIZE_N), i ∈ [0, inner_block_idx + 1)
    diff = qo_n_index[:, None] - kv_n_index[None, :], 
    Out[b][h][qo_n_index][e] 
    = Σ_i∈ [0, inner_block_idx + 1)( 
        Q[b][h][qo_n_index][d] 
        * K[b][h][kv_n_index][d]^T 
        * exp(where(diff>=0, -s * diff, "-inf"))
        * V[b][h][kv_n_index][e] 
    )
    根据结果Out, 还可在 e 维度进行切分，假设切分为 block_num_e = cdiv(e, BLOCK_SIZE_E) 块, 
    设 e_index = tl.arange(0, BLOCK_SIZE_E) + block_idx_e * BLOCK_SIZE_E
    此时
    Out[b][h][qo_n_index][e_index] 
    = Σ_i∈ [0, inner_block_idx + 1)( 
        Q[b][h][qo_n_index][d] 
        * K[b][h][kv_n_index][d]^T 
        * exp(where(diff>=0, -s * diff, "-inf"))
        * V[b][h][kv_n_index][e_index] 
    )
    而对于 Q 与 K^T 的乘积, 可以按照矩阵乘法在K维度(这里是d维度)进行循环累加分块，
    假设切分为 block_num_d = tl.cdiv(d, BLOCK_SIZE_D) 块,  d_idx ∈ [0, block_num_d), 
    d_index_move = arange(0, BLOCK_SIZE_D) + d_idx * BLOCK_SIZE_D
    Out[b][h][qo_n_index][e_index] 
    = Σ_i∈ [0, inner_block_idx + 1)( 
        Σ_d_idx(
            Q[b][h][qo_n_index][d_index_move]
            * K[b][h][kv_n_index][d_index_move]^T 
        )
        * exp(where(diff>=0, -s * diff, "-inf"))
        * V[b][h][kv_n_index][e_index] 
    )
    注意实际 inner_block 和 inter_block 可能不足 BLOCK_SIZE_N 和 INTER_BLOCK,
    该细节从掩码进行实现: inter_block_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)
    '''
    cal_type = tl.float32
    out_type = Out.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    # divide out task
    # batch num = b * h
    # each batch = n * e
    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    inner_block_num = tl.cdiv(INTER_BLOCK, BLOCK_SIZE_N)
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    # task num of each batch
    block_num_per_bh = inter_block_num * inner_block_num * block_num_e
    
    # distribute tasks in each batch
    # cal start_task_idx for current program before distribute tasks
    # update start_program_idx after calculating tasks in this batch
    start_program_idx = 0
    start_task_idx = 0
    # traverse batch
    for bh_idx in tl.range(0, bh_num): 

        h_idx = bh_idx % h
        S_block_idx = S + h_idx
        s = tl.load(S_block_idx).to(cal_type)

        qk_offs = bh_idx * n * d
        vo_offs = bh_idx * n * e

        start_task_idx = program_idx - start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num
        
        # traverse tasks and distribute to each program
        for task_idx in tl.range(start_task_idx, block_num_per_bh, program_num): 

            block_idx_e = task_idx % block_num_e
            inner_block_total_idx = task_idx // block_num_e
            inter_block_idx = inner_block_total_idx // inner_block_num
            inner_block_idx = inner_block_total_idx % inner_block_num
            
            # inter block offsets
            inter_block_offs = inter_block_idx * INTER_BLOCK
            inter_block_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)

            # inner block offsets
            inner_block_offs = inner_block_idx * BLOCK_SIZE_N

            # row col index base and ptr base
            qo_n_index = inter_block_offs + inner_block_offs + tl.arange(0, BLOCK_SIZE_N)
            d_index = tl.arange(0, BLOCK_SIZE_D)
            e_index = tl.arange(0, BLOCK_SIZE_E) + block_idx_e * BLOCK_SIZE_E

            q_2d_index_base = qo_n_index[:, None] * d + d_index[None, :]
            o_2d_index = qo_n_index[:, None] * e + e_index[None, :]

            Q_block_ptr_base = Q + qk_offs + q_2d_index_base
            O_block_ptr = Out + vo_offs + o_2d_index

            # cal task: 
            qkv = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_E], dtype = cal_type)
            # traverse current and previous inner_block of k v synchronously in the same inter_block as q         
            for i in tl.range(1 + inner_block_idx): 
                
                # row col index and ptr 
                kv_n_index = inter_block_offs + i * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
                k_2d_index_base = kv_n_index[:, None] * d + d_index[None, :]
                v_2d_index = kv_n_index[:, None] * e + e_index[None, :]

                K_block_ptr_base = K + qk_offs + k_2d_index_base
                V_block_ptr = V + vo_offs + v_2d_index

                # casual attn mask + decay
                # q不能关注到未来的key信息
                # diff = 0， q pos = k pos，s_index = 0，decay = 1
                # diff > 0， q pos > k pos，s_index < 0，decay < 1
                # diff < 0， q pos < k pos，s_index = -inf，decay = 0
                diff = qo_n_index[:, None] - kv_n_index[None, :]
                s_index = s * diff
                s_index = tl.where(diff >= 0, -s_index, float("-inf"))
                decay = tl.exp(s_index)

                # q k matmul
                qk = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_N], dtype = cal_type)
                for d_idx in tl.range(block_num_d): 

                    Q_block_ptr = Q_block_ptr_base + d_idx * BLOCK_SIZE_D
                    K_block_ptr = K_block_ptr_base + d_idx * BLOCK_SIZE_D

                    d_index_move = d_index + d_idx * BLOCK_SIZE_D
                    # mask_q = qo_n_index[:, None] < n and d_index_move[None, :] < d
                    # mask_k = kv_n_index[:, None] < n and d_index_move[None, :] < d
                    mask_q = qo_n_index[:, None] < inter_block_bound and d_index_move[None, :] < d
                    mask_k = kv_n_index[:, None] < inter_block_bound and d_index_move[None, :] < d
                    
                    q = tl.load(Q_block_ptr, mask = mask_q, other = 0.0)#.to(cal_type)
                    k = tl.load(K_block_ptr, mask = mask_k, other = 0.0)#.to(cal_type)
                    k_trans = tl.trans(k)
                    qk += tl.dot(q, k_trans)
                
                qk = qk * decay

                # qk matmul result * v block
                # mask_v = kv_n_index[:, None] < n and e_index[None, :] < e
                mask_v = kv_n_index[:, None] < inter_block_bound and e_index[None, :] < e
                v = tl.load(V_block_ptr, mask = mask_v, other = 0.0).to(cal_type)
                qkv += tl.dot(qk, v)
            
            # mask_o = qo_n_index[:, None] < n and e_index[None, :] < e
            mask_o = qo_n_index[:, None] < inter_block_bound and e_index[None, :] < e
            tl.store(O_block_ptr, qkv.to(out_type), mask = mask_o)

        start_program_idx = (start_program_idx + block_num_per_bh) % program_num
        
@triton.jit
def _fwd_kv_parallel(
    K,  # K.shape = (b, h, n, d)
    V,  # V.shape = (b, h, n, e)
    S, 
    KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
    b: tl.constexpr,
    h: tl.constexpr,
    n: tl.constexpr,
    d: tl.constexpr,
    e: tl.constexpr,
    INTER_BLOCK: tl.constexpr,    # 256
    BLOCK_SIZE_N: tl.constexpr,  #  32 -> 64
    BLOCK_SIZE_D: tl.constexpr, 
    BLOCK_SIZE_E: tl.constexpr, 
):
    '''
    按照 INTER_BLOCK 划分输入序列 n, 
    将因果注意力输出划分为多个 [INTER_BLOCK, INTER_BLOCK] 块, 
    以块为单位计算 kv_cache, 用于后续累加. 

    算法描述（变量不对应代码）:
    对于当前 batch 的 INTER_BLOCK i, 对应序列为 j ∈ [i * INTER_BLOCK, (i+1) * INTER_BLOCK) 或 [i * INTER_BLOCK, n)
    计算结果 KV[b][h][i] = Σ_j (k_j^T * v_j^T * exp(-s * ( min((i+1)*INTER_BLOCK, n) - j - 1)) ) 

    '''
    cal_type = tl.float32
    out_type = KV.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    inner_block_num = tl.cdiv(INTER_BLOCK, BLOCK_SIZE_N)
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_per_bh_inter_block = block_num_d * block_num_e

    start_program_idx = 0
    start_task_idx = 0
    for bh_inter_block_idx in tl.range(0, bh_num * inter_block_num): 

        inter_block_idx = bh_inter_block_idx % inter_block_num
        bh_idx = bh_inter_block_idx // inter_block_num
        h_idx = bh_idx % h

        # new
        S_block_idx = S + h_idx
        s = tl.load(S_block_idx).to(cal_type)

        k_offs = bh_idx * n * d
        v_offs = bh_idx * n * e
        kv_offs = bh_inter_block_idx * d * e
        kv_n_index_base = inter_block_idx * INTER_BLOCK + tl.arange(0, BLOCK_SIZE_N)
        kv_n_index_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)

        # deal with the last inter_block
        inter_block = INTER_BLOCK
        inner_block_actual_num = inner_block_num
        if inter_block_idx == inter_block_num - 1: 
            inter_block = n - inter_block_idx * INTER_BLOCK
            inner_block_actual_num = tl.cdiv(inter_block, BLOCK_SIZE_N)
        inter_block_shift = INTER_BLOCK - inter_block

        start_task_idx = program_idx - start_program_idx
        # FIXME 这里原来把 start_task_idx 写成 start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num
        
        for task_idx in tl.range(start_task_idx, block_num_per_bh_inter_block, program_num): 
            
            block_idx_e = task_idx % block_num_e
            block_idx_d = task_idx // block_num_e

            k_d_index = block_idx_d * BLOCK_SIZE_D + tl.arange(0, BLOCK_SIZE_D)
            v_e_index = block_idx_e * BLOCK_SIZE_E + tl.arange(0, BLOCK_SIZE_E)

            k_2d_index_base = kv_n_index_base[:, None] * d + k_d_index[None, :]
            v_2d_index_base = kv_n_index_base[:, None] * e + v_e_index[None, :]
            k_block_ptrs_base = K + k_offs + k_2d_index_base
            v_block_ptrs_base = V + v_offs + v_2d_index_base

            kv_block = tl.zeros([BLOCK_SIZE_D, BLOCK_SIZE_E], dtype = cal_type)
            for inner_block_idx in tl.range(inner_block_actual_num): 
                
                n_move = inner_block_idx * BLOCK_SIZE_N
                k_block_ptrs = k_block_ptrs_base + n_move * d
                v_block_ptrs = v_block_ptrs_base + n_move * e

                kv_n_index = kv_n_index_base + n_move
                # mask_k = kv_n_index[:, None] < n and k_d_index[None, :] < d
                # mask_v = kv_n_index[:, None] < n and v_e_index[None, :] < e
                mask_k = kv_n_index[:, None] < kv_n_index_bound and k_d_index[None, :] < d
                mask_v = kv_n_index[:, None] < kv_n_index_bound and v_e_index[None, :] < e

                k = tl.load(k_block_ptrs, mask = mask_k, other = 0.0)#.to(cal_type)
                k_trans = tl.trans(k)
                v = tl.load(v_block_ptrs, mask = mask_v, other = 0.0).to(cal_type)
                # new: cal k_decay
                diff = kv_n_index_bound - kv_n_index - 1
                s_index = s * diff
                s_index = tl.where(diff >= 0, -s_index, float("-inf"))
                k_decay = tl.exp(s_index)

                kv_block += tl.dot(k_trans * k_decay, v)
            
            kv_2d_index_base = k_d_index[:, None] * e + v_e_index[None, :]
            kv_block_ptrs_base = KV + kv_offs + kv_2d_index_base
            mask_kv = k_d_index[:, None] < d and v_e_index[None, :] < e
            tl.store(kv_block_ptrs_base, kv_block.to(out_type), mask = mask_kv)

        start_program_idx = (start_program_idx + block_num_per_bh_inter_block) % program_num

    


@triton.jit
def _fwd_kv_reduce(
        S, 
        KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
        KV_HISTORY,     # kv_history.shape = (b, h, d, e)
        b: tl.constexpr, 
        h: tl.constexpr, 
        n : tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        INTER_BLOCK: tl.constexpr,
        BLOCK_SIZE_D: tl.constexpr, 
        BLOCK_SIZE_E: tl.constexpr, 
    ):

    cal_type = tl.float32
    kv_type = KV.dtype.element_ty
    kv_history_type = KV_HISTORY.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_kv = block_num_d * block_num_e

    start_program_idx = 0
    start_task_idx = 0 
    for bh_idx in tl.range(bh_num): 

        h_idx = bh_idx % h
        s_ptr = S + h_idx
        s = tl.load(s_ptr).to(cal_type)

        kv_offs = bh_idx * inter_block_num * d * e
        kv_history_offs = bh_idx * d * e

        start_task_idx = program_idx - start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num

        for task_idx in tl.range(start_task_idx, block_num_kv, program_num): 

            task_idx_e = task_idx % block_num_e
            task_idx_d = task_idx // block_num_e

            index_d = task_idx_d * BLOCK_SIZE_D + tl.arange(0, BLOCK_SIZE_D)
            index_e = task_idx_e * BLOCK_SIZE_E + tl.arange(0, BLOCK_SIZE_E)
            index_2d = index_d[:, None] * e + index_e[None, :]

            kv_history_ptrs = KV_HISTORY + kv_history_offs + index_2d
            kv_ptrs_base = KV + kv_offs + index_2d

            same_mask = index_d[:, None] < d and index_e[None, :] < e
            kv_pre = tl.load(kv_history_ptrs, mask = same_mask, other = 0.0).to(cal_type)

            for inter_block_idx in tl.range(inter_block_num): 

                kv_ptrs = kv_ptrs_base + inter_block_idx * d * e

                # cal decay power
                inter_block = min(n - inter_block_idx * INTER_BLOCK, INTER_BLOCK)
                inter_block_decay = tl.exp( -s * inter_block)

                kv_cur = tl.load(kv_ptrs, mask = same_mask, other = 0.0)#.to(cal_type)

                # tl.store(kv_ptrs, kv_pre.to(kv_type), mask = same_mask)
                tl.store(kv_ptrs, kv_pre, mask = same_mask)

                kv_pre = inter_block_decay * kv_pre + kv_cur
            
            
            tl.store(kv_history_ptrs, kv_pre.to(kv_history_type), mask = same_mask)

        start_program_idx = (start_program_idx + block_num_kv) % program_num





@triton.jit
def _fwd_none_diag_kernel(
        Q, 
        Out, 
        Out_diag, 
        S, 
        KV, 
        #
        b: tl.constexpr, 
        h: tl.constexpr, 
        n: tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        #
        INTER_BLOCK: tl.constexpr,    # 256
        BLOCK_SIZE_N: tl.constexpr,     # 32
        BLOCK_SIZE_D: tl.constexpr, 
        BLOCK_SIZE_E: tl.constexpr, 
    ):
    
    cal_type = tl.float32
    out_type = Out.dtype.element_ty

    # grid = (20, )
    program_idx = tl.program_id(0)
    program_num = tl.num_programs(0)

    bh_num = b * h
    inter_block_num = tl.cdiv(n, INTER_BLOCK)
    inner_block_num = tl.cdiv(INTER_BLOCK, BLOCK_SIZE_N)
    block_num_n = inter_block_num * inner_block_num
    block_num_d = tl.cdiv(d, BLOCK_SIZE_D)
    block_num_e = tl.cdiv(e, BLOCK_SIZE_E)
    block_num_per_bh = block_num_n * block_num_e

    start_program_idx = 0
    start_task_idx = 0
    for bh_idx in tl.range(0, bh_num): 

        h_idx = bh_idx % h
        S_block_idx = S + h_idx
        s = tl.load(S_block_idx).to(cal_type)

        bh_offs_q = bh_idx * n * d
        bh_offs_o = bh_idx * n * e
        bh_offs_kv = bh_idx * inter_block_num *d * e

        start_task_idx = program_idx - start_program_idx
        if start_task_idx < 0: 
            start_task_idx += program_num

        for task_idx in tl.range(start_task_idx, block_num_per_bh, program_num):
            
            inner_block_idx = task_idx // block_num_e
            block_idx_e = task_idx % block_num_e
            inter_block_idx = inner_block_idx // inner_block_num
            inner_inter_block_idx = inner_block_idx % inner_block_num

            e_index = block_idx_e * BLOCK_SIZE_E + tl.arange(0, BLOCK_SIZE_E)
            # n_index = inner_block_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
            n_index = inter_block_idx * INTER_BLOCK + inner_inter_block_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
            n_index_bound = min(n, (inter_block_idx + 1) * INTER_BLOCK)
            d_index_base = tl.arange(0, BLOCK_SIZE_D)

            q_2d_index_base = n_index[:, None] * d + d_index_base[None, :]
            kv_2d_index_base = d_index_base[:, None] * e + e_index[None, :]

            q_ptrs_base = Q + bh_offs_q + q_2d_index_base
            kv_ptrs_base = KV + bh_offs_kv + inter_block_idx * d * e + kv_2d_index_base

            qkv_block = tl.zeros([BLOCK_SIZE_N, BLOCK_SIZE_E], dtype = cal_type)
            for d_idx in tl.range(block_num_d): 
                q_ptrs = q_ptrs_base + d_idx * BLOCK_SIZE_D
                kv_ptrs = kv_ptrs_base + d_idx * BLOCK_SIZE_D * e

                d_index = d_index_base + d_idx * BLOCK_SIZE_D

                # mask_q = n_index[:, None] < n and d_index[None, :] < d
                mask_q = n_index[:, None] < n_index_bound and d_index[None, :] < d
                mask_kv = d_index[:, None] < d and e_index[None, :] < e

                q = tl.load(q_ptrs, mask = mask_q, other = 0.0).to(cal_type)
                kv = tl.load(kv_ptrs, mask = mask_kv, other = 0.0)#.to(cal_type)
                qkv_block += tl.dot(q, kv)

            # 注意这里有 +1
            inner_block_array = inner_inter_block_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) + 1
            q_decay = tl.exp(-s * inner_block_array[:, None])
            qkv_none_diag_block = qkv_block * q_decay

            o_2d_index = n_index[:, None] * e + e_index[None, :]
            # mask_o = n_index[:, None] < n and e_index[None, :] < e
            mask_o = n_index[:, None] < n_index_bound and e_index[None, :] < e
            o_ptrs = Out + bh_offs_o + o_2d_index
            o_diag_ptrs = Out_diag + bh_offs_o + o_2d_index
            
            qkv_diag_block = tl.load(o_diag_ptrs, mask = mask_o, other = 0.0)#.to(cal_type)
            out_block = (qkv_diag_block + qkv_none_diag_block).to(out_type)
            tl.store(o_ptrs, out_block, mask = mask_o)

        start_program_idx = (start_program_idx + block_num_per_bh) % program_num
            
        

def lightning_attention(
        q : torch.Tensor, 
        k : torch.Tensor, 
        v : torch.Tensor, 
        s : torch.Tensor, 
        kv_history : torch.Tensor, 
        block_size : int, 
):

    """
    Apply lightning attention algorithm 
    to compute attention efficiently.
    
    Args:
        q: Query tensor of shape [batch, heads, seq_len, dim]
        k: Key tensor of shape [batch, heads, seq_len, dim]
        v: Value tensor of shape [batch, heads, seq_len, dim_v]
        ed: Decay rate tensor of shape [heads]
        block_size: Size of blocks for block-sparse attention
        kv_history: Optional key-value history from previous computations
        
    Returns:
        output: Attention output
        kv: Updated key-value history
    """
    # from triton.runtime.driver import driver
    # npu hardware params from trion
    # class: self.backend: str, self.arch: str, self.warp_size: int
    target = driver.active.get_current_target() 
    # int
    device = driver.active.get_current_device()
    # dict: {"max_shared_mem": int, "num_aicore": int, "num_vectorcore": int}
    prop = driver.active.utils.get_device_properties(device)

    max_shared_mem = prop["max_shared_mem"]
    num_cube_core = prop["num_aicore"]
    num_vector_core = prop["num_vectorcore"]
    # print(f"\nRunning triton on {target.backend} {target.arch}. ")
    # print(device)
    # print(f"Max shared memory = {max_shared_mem}, AICore num = {num_cube_core}, VectorCore num = {num_vector_core}. \n")

    grid_cube = (num_cube_core, )
    grid_vec = (num_vector_core, )

    cal_type = torch.float32
    input_type = q.dtype

    q = q.contiguous()
    k = k.contiguous()
    v = v.contiguous()
    s = s.contiguous()  
    
    # b->batch, h->head, n->tokens, d->q、k dimension, e->v dimension
    # q.shape = (b, h, n, d)
    # k.shape = (b, h, n, d)
    # v.shape = (b, h, n, e)
    # s.shape = (h, ) 衰减率
    b, h, n, d = q.shape
    e = v.shape[-1]

    # new
    if s.dim() == 1: 
        s = s.view(1, -1, 1, 1)

    if kv_history is None: 
        kv_history = torch.zeros((q.shape[0], q.shape[1], d, e), 
                                 dtype = input_type, #cal_type, 
                                 device = q.device)
    else: 
        kv_history = kv_history.clone().contiguous()#.to(cal_type)


    # new
    o_diag = torch.empty((b, h, n, e), dtype = cal_type, device = q.device)
    o = torch.empty((b, h, n, e), dtype = input_type, device = q.device)
    
    # 将序列 n 按照 BLOCK 拆分为块（向上除），块数为 NUM_BLOCK
    BLOCK = block_size
    NUM_BLOCK = triton.cdiv(n, BLOCK)
    # 再将 BLOCK 拆分为小块（向下除，但要求整除），小块数为 NUM_CBLOCK
    BLOCK_SIZE_N = 64
    BLOCK_SIZE_D_CUBE = 128
    BLOCK_SIZE_E_CUBE = 128
    BLOCK_SIZE_D_VEC = 64
    BLOCK_SIZE_E_VEC = 64
    
    # ===========================================
    _fwd_diag_kernel[grid_cube](
                            q,
                            k,
                            v,
                            o_diag,
                            s,
                            #
                            b,
                            h,
                            n,
                            d,
                            e,
                            INTER_BLOCK = BLOCK, 
                            BLOCK_SIZE_N = BLOCK_SIZE_N, 
                            BLOCK_SIZE_D = BLOCK_SIZE_D_CUBE, 
                            BLOCK_SIZE_E = BLOCK_SIZE_E_CUBE
                            )
    # ===========================================
    kv = torch.empty((b, h, NUM_BLOCK, d, e),
                        dtype=cal_type,
                        device=q.device)

    _fwd_kv_parallel[grid_cube](
        k, 
        v, 
        s, 
        kv, 
        b, 
        h, 
        n, 
        d, 
        e, 
        INTER_BLOCK = BLOCK, 
        BLOCK_SIZE_N = BLOCK_SIZE_N, 
        BLOCK_SIZE_D = BLOCK_SIZE_D_CUBE, 
        BLOCK_SIZE_E = BLOCK_SIZE_E_CUBE, 
    )

    # ===========================================
    _fwd_kv_reduce[grid_vec](
        s, 
        kv, 
        kv_history, 
        b, 
        h, 
        n, 
        d, 
        e, 
        INTER_BLOCK = BLOCK, 
        BLOCK_SIZE_D = BLOCK_SIZE_D_VEC, 
        BLOCK_SIZE_E = BLOCK_SIZE_E_VEC, 
    )
    # ===========================================
    _fwd_none_diag_kernel[grid_cube](
        q, 
        o, 
        o_diag, 
        s, 
        kv, 
        b, 
        h, 
        n, 
        d, 
        e, 
        INTER_BLOCK = BLOCK, 
        BLOCK_SIZE_N = BLOCK_SIZE_N, 
        BLOCK_SIZE_D = BLOCK_SIZE_D_CUBE, 
        BLOCK_SIZE_E = BLOCK_SIZE_E_CUBE, 
    )
    # ===========================================
    return o, kv_history, kv.to(input_type)
