import torch
import torch_npu
# from einops import rearrange
import triton
import triton.language as tl

@triton.jit
def _fwd_diag_kernel(
        Q, 
        K, 
        V, 
        Out, 
        S, 
        #
        b: tl.constexpr, 
        h: tl.constexpr, 
        n: tl.constexpr,
        d: tl.constexpr, 
        e: tl.constexpr, 
        #
        BLOCK: tl.constexpr,    # 256
        NUM_BLOCK: tl.constexpr,    # tl.cdiv(n, BLOCK)
        CBLOCK: tl.constexpr    # 32
    ):
    cal_type = tl.float32
    out_type = Out.dtype.element_ty

    # grid = (b * h * NUM_BLOCK, NUM_CBLOCK)
    off = tl.program_id(0)
    # [0, b * h] 中第几个 h
    off_bh = off // NUM_BLOCK
    # 当前 h 内第几个BLOCK
    off_block = off % NUM_BLOCK
    # 当前 block 内第几个CBLOCK
    off_cblock = tl.program_id(1)
    # 当前 b 中的[0, h) 中第几个h
    off_h = off_bh % h 

    # 定位当前 b 和 h 的 QK 起始偏移
    qk_offset = off_bh * n * d
    # 定位当前 b 和 h 的 V和O 起始偏移
    v_offset = off_bh * n * e
    o_offset = off_bh * n * e

    # 在一个[n, d]和[n, e]内的BLOCK偏移
    block_offset = off_block * BLOCK
    qk_block_offset = block_offset * d
    v_block_offset = block_offset * e
    o_block_offset = block_offset * e
    # CBOCK偏移
    cblock_offset = off_cblock * CBLOCK
    q_cblock_offset = cblock_offset * d
    o_cblock_offset = cblock_offset * e

    Q_block_ptr = (Q + qk_offset + qk_block_offset + q_cblock_offset +
                   tl.arange(0, CBLOCK)[:, None] * d +
                   tl.arange(0, d)[None, :])
    K_block_ptr = (K + qk_offset + qk_block_offset +
                    tl.arange(0, CBLOCK)[:, None] * d +
                    tl.arange(0, d)[None, :])
    V_block_ptr = (V + v_offset + v_block_offset +
                   tl.arange(0, CBLOCK)[:, None] * e +
                   tl.arange(0, e)[None, :])
    O_block_ptr = (Out + o_offset + o_block_offset + o_cblock_offset +
                   tl.arange(0, CBLOCK)[:, None] * e +
                   tl.arange(0, e)[None, :])

    # 当前 head 衰减率，一个head 对应 1个值
    S_block_ptr = S + off_h
    s = tl.load(S_block_ptr).to(cal_type)

    i = off_cblock
    # 当前 CBLOCK 的偏移
    q_index = tl.arange(0, CBLOCK) + i * CBLOCK

    # mask 是n方向上的，输入的mask 的 shape 只有 (CBLOCK, 1)，但是load的时候会广播，结果为按行掩码
    q = tl.load(Q_block_ptr,
                mask=block_offset + q_index[:, None] < n,
                other=0.0).to(cal_type)

    qkv = tl.zeros([CBLOCK, e], dtype=cal_type)
    # q不动，kv偏移CBLOCK
    for j in range(i + 1):
        kv_index = tl.arange(0, CBLOCK) + j * CBLOCK
        # 位置衰减！q不能关注到未来的key信息
        # diff = 0，则 q位置 = k位置，s_index = 0，decay = 1
        # diff > 0，则 q位置 > k位置，s_index < 0，decay < 1
        # diff < 0，则 q位置 < k位置，s_index = -inf，decay = 0
        # diff.shape = s_index.shape = decay.shape = (CBLOCK, CBLOCK)
        diff = q_index[:, None] - kv_index[None, :]
        s_index = s * diff
        s_index = tl.where(diff >= 0, -s_index, float("-inf"))
        decay = tl.exp(s_index)

        k = tl.load(
            K_block_ptr,
            mask=block_offset + kv_index[:, None] < n,
            other=0.0,
        ).to(cal_type)
        v = tl.load(
            V_block_ptr,
            mask=block_offset + kv_index[:, None] < n,
            other=0.0,
        ).to(cal_type)

        k_trans = tl.trans(k)
        qk = tl.dot(q, k_trans) * decay

        qkv += tl.dot(qk, v)

        K_block_ptr += CBLOCK * d
        V_block_ptr += CBLOCK * e

    # Store the result
    tl.store(
        O_block_ptr,
        qkv.to(out_type),
        mask=block_offset + q_index[:, None] < n,
    )


@triton.jit
def _fwd_kv_parallel(
    K,
    V,
    K_decay,    # k_decay.shape = (h, BLOCK)
    KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
    b: tl.constexpr,
    h: tl.constexpr,
    n,
    d: tl.constexpr,
    e: tl.constexpr,
    BLOCK: tl.constexpr,    # 256
    NUM_BLOCK,  # tl.cdiv(n, BLOCK)
    D_FBLOCK: tl.constexpr, # d // num_FBLOCK = d
    E_FBLOCK: tl.constexpr, # e // num_FBLOCK = e
    NUM_FBLOCK: tl.constexpr,   # 1
    CBLOCK: tl.constexpr,  #  32 -> 64
    NUM_CBLOCK: tl.constexpr,   # BLOCK // CBLOCK
):
    # grid = (b * h, NUM_BLOCK)

    cal_type = tl.float32
    output_type = KV.dtype.element_ty

    off_bh = tl.program_id(0)
    off_block = tl.program_id(1)

    off_h = off_bh % h 

    block_offset = off_block * BLOCK

    k_block_offset = block_offset * d
    v_block_offset = block_offset * e
    kv_block_offset = off_block * d * e

    k_offset = off_bh * n * d
    v_offset = off_bh * n * e
    kv_offset = off_bh * NUM_BLOCK * d * e

    K_block_ptr = (K + k_offset + k_block_offset +
                    tl.arange(0, CBLOCK)[:, None] * d +
                    tl.arange(0, D_FBLOCK)[None, :])
    V_block_ptr = (V + v_offset + v_block_offset +
                   tl.arange(0, CBLOCK)[:, None] * e +
                   tl.arange(0, E_FBLOCK)[None, :])
    KV_block_ptr = (KV + kv_offset + kv_block_offset +
                    tl.arange(0, D_FBLOCK)[:, None] * e +
                    tl.arange(0, E_FBLOCK)[None, :])

    k_decay_ptr = (K_decay + off_h * BLOCK + tl.arange(0, CBLOCK)[None, :])

    
    kv_index = tl.arange(0, CBLOCK)
    # 假设一个block对应的 tokens 范围是 [256, 512)
    # 则计算 token 511 的时候，需要累加到 k256^T * v256 + k257^T * v257 + ... + k511^T * v511
    # 这个是当前 B、H 的 一个block 需要缓存下来的无需掩码的部分的 kv_cache ，乘以1个block内不同tokens对应的不同幂次的衰减
    kv = tl.zeros([D_FBLOCK, E_FBLOCK], dtype=tl.float32)

    # n可能没有完全被 BLOCK 整除，这里计算最后一个实际 block 长度或当前非末尾block长度BLOCK
    if off_block == NUM_BLOCK - 1:
        split_n = n - (NUM_BLOCK - 1) * BLOCK
    else:
        split_n = BLOCK
    # split_n 是尾block时可能没有完全被 CBLOCK 整除，这里计算的是 CBLOCK 与 实际的尾 cblock 长度 的差 
    left_shift = tl.cdiv(split_n, CBLOCK) * CBLOCK - split_n
    # split_n 实际需要分几个 CBLOCK
    num_blocks = min(tl.cdiv(split_n, CBLOCK), NUM_CBLOCK)
    # TODO 跳过CBLOCK？
    k_decay_ptr += (NUM_CBLOCK - num_blocks) * CBLOCK

    for j in range(num_blocks):
        left_bound = (1 - j) * left_shift
        # 依然是从小到大偏移，但是这样处理相当于在最先计算了尾 cblock 长度的 tokens量
        # k.shape = v.shape = (CBLOCK, D_FBLOCK) or (CBLOCK - left_shift, E_FBLOCK)
        k = tl.load(K_block_ptr - left_shift * d,
                    mask=kv_index[:, None] >= left_bound,
                    other=0.0).to(cal_type)
        v = tl.load(V_block_ptr - left_shift * e,
                    mask=kv_index[:, None] >= left_bound,
                    other=0.0).to(cal_type)

        k_decay = tl.load(k_decay_ptr).to(cal_type)
        k_trans = tl.trans(k)
        kv += tl.dot(k_trans * k_decay, v)

        K_block_ptr += CBLOCK * d
        V_block_ptr += CBLOCK * e
        k_decay_ptr += CBLOCK

    
    tl.store(KV_block_ptr, kv.to(output_type))


@triton.jit
def _fwd_kv_reduce(
        S, 
        KV, # KV.shape = (b, h, NUM_BLOCK, d, e)
        KV_HISTORY,     # kv_history.shape = (b, h, d, e)
        b: tl.constexpr, 
        h: tl.constexpr, 
        n,
        d: tl.constexpr, 
        e: tl.constexpr, 
        BLOCK: tl.constexpr,
        NUM_BLOCK, 
        D_FBLOCK: tl.constexpr, 
        E_FBLOCK: tl.constexpr
    ):
    # grid = (b * h, NUM_FBLOCK = 1)
    cal_type = tl.float32
    kv_type = KV.dtype.element_ty
    kv_history_type = KV_HISTORY.dtype.element_ty
    
    off_bh = tl.program_id(0)
    off_h = off_bh % h 

    kv_offset = off_bh * NUM_BLOCK * d * e

    KV_block_ptr = (KV + kv_offset + tl.arange(0, D_FBLOCK)[:, None] * e +
                    tl.arange(0, E_FBLOCK)[None, :])

    s_ptrs = S + off_h
    s = tl.load(s_ptrs).to(cal_type)

    # 一次处理一个 b * h 的 kv_history
    kv_history_offset = off_bh * d * e
    KV_HISTORY_block_ptr = (KV_HISTORY + kv_history_offset +
                            tl.arange(0, D_FBLOCK)[:, None] * e +
                            tl.arange(0, E_FBLOCK)[None, :])

    kv_pre = tl.load(KV_HISTORY_block_ptr).to(cal_type)

    # 循环处理 KV 的 dim2，将 NUM_BLOCK 个 d * e 的 各个 BLOCK 对应的 kv_cache 加载出来，乘以衰减加上 kv_history
    for i in range(NUM_BLOCK): 
        block_size = min(n - i * BLOCK, BLOCK)
        block_decay = tl.exp(-s.to(tl.float32) * block_size)

        kv_cur = tl.load(KV_block_ptr).to(cal_type)
        # 先前的 KV[b][h][NUM_BLOCK] 只有 block 
        # 最终每个 KV[b][h][i]存储的是这个 block 的
        tl.store(KV_block_ptr, kv_pre.to(kv_type))

        # 这里将之前的 NUM_BLOCK 的 最后一块 KV_CACHE乘以衰减然后累加，对应了 torch 中的算法实现。
        kv_pre = block_decay * kv_pre + kv_cur
        KV_block_ptr += d * e

    tl.store(KV_HISTORY_block_ptr, kv_pre.to(kv_history_type))


@triton.jit
def _fwd_none_diag_kernel(
    Q,  # Q.shape = (b, h, n, d)
    Out,    # Out.shape = (b, h, n, e)
    S,  # S.shape = (h, )
    KV,
    b: tl.constexpr,
    h: tl.constexpr,
    n,
    d: tl.constexpr,
    e: tl.constexpr,
    BLOCK: tl.constexpr,
    NUM_BLOCK,
    E_FBLOCK: tl.constexpr,
    CBLOCK: tl.constexpr,
    NUM_CBLOCK: tl.constexpr,
):
    # grid = (b * h, NUM_BLOCK * NUM_CBLOCK)
    cal_type = tl.float32
    out_type = Out.dtype.element_ty

    off_bh = tl.program_id(0)
    off_h = off_bh % h 

    off_nc = tl.program_id(1)
    off_n = off_nc // NUM_CBLOCK
    off_c = off_nc % NUM_CBLOCK 
    off_e = tl.program_id(2) # 0

    n_offset = off_n * BLOCK
    c_offset = off_c * CBLOCK
    e_offset = off_e * E_FBLOCK
    block_offset = n_offset + c_offset

    q_offset = off_bh * n * d + (n_offset + c_offset) * d
    o_offset = off_bh * n * e + (n_offset + c_offset) * e + e_offset
    kv_offset = off_bh * NUM_BLOCK * d * e + off_n * d * e + e_offset

    Q_block_ptr = (Q + q_offset + tl.arange(0, CBLOCK)[:, None] * d +
                   tl.arange(0, d)[None, :])
    O_block_ptr = (Out + o_offset + tl.arange(0, CBLOCK)[:, None] * e +
                   tl.arange(0, E_FBLOCK)[None, :])
    KV_block_ptr = (KV + kv_offset + tl.arange(0, d)[:, None] * e +
                    tl.arange(0, E_FBLOCK)[None, :])

    S_block_ptr = S + off_h
    s = tl.load(S_block_ptr).to(cal_type)

    # FIXME 这里漏了一此衰减（已经加1）
    c_array = tl.arange(0, CBLOCK) + 1

    kv = tl.load(KV_block_ptr).to(cal_type)
    q_index = block_offset + tl.arange(0, CBLOCK)

    q = tl.load(Q_block_ptr, mask=q_index[:, None] < n,
                other=0.).to(cal_type)

    q_decay = tl.exp(-s * (off_c * CBLOCK + c_array[:, None]))

    qkv_none_diag = tl.dot(q, kv) * q_decay

    qkv_diag = tl.load(O_block_ptr, mask=q_index[:, None] < n,
                       other=0.).to(cal_type)

    qkv = qkv_diag + qkv_none_diag

    tl.store(O_block_ptr,
             qkv.to(out_type),
             mask=q_index[:, None] < n)


class _attention(torch.autograd.Function):

    @staticmethod
    def forward(ctx, q, k, v, s, kv_history):

        cal_type = torch.float32
        input_type = q.dtype

        q = q.contiguous()
        k = k.contiguous()
        v = v.contiguous()
        s = s.contiguous()  
        
        # b->batch, h->head, n->tokens, d->q、k dimension, e->v dimension
        # q.shape = (b, h, n, d)
        # k.shape = (b, h, n, d)
        # v.shape = (b, h, n, e)
        # s.shape = (h, ) 衰减率
        b, h, n, d = q.shape
        e = v.shape[-1]

        o = torch.empty((b, h, n, e), dtype=cal_type, device=q.device)
        
        # 将序列 n 按照 BLOCK 拆分为块（向上除），块数为 NUM_BLOCK
        BLOCK = 256
        NUM_BLOCK = triton.cdiv(n, BLOCK)
        # 再将 BLOCK 拆分为小块（向下除，但要求整除），小块数为 NUM_CBLOCK
        CBLOCK = 32
        NUM_CBLOCK = BLOCK // CBLOCK
        assert BLOCK % CBLOCK == 0, "BLOCK must be a multiple of CBLOCK"

        # (1, BLOCK+1), shape = (BLOCK)
        array = torch.arange(0, BLOCK, device=q.device) + 1  
        # array.reshape shape-> (1, BLOCK)， BLOCK - array.reshape(1, -1) 元素范围 (BLOCK-1, 0)（从大到小）
        # s.shape = (h, ) 衰减率?
        # k_decay.shape = (h, BLOCK), 每行元素为：s[h]的BLOCK次方，一直到 1次方
        # 这个对应于算法实现中，行i对应输出对应的 ki^T * vi 在计算新的 kv_cache的时候，都需要将前一 kv_cache乘以衰减再加上当前的ki^T * vi，
        # 得到新的 kv_cache，所以 i 越小的输出对应的 kv_cache，需要被乘以更多次衰减系数
        # 生成每个注意力头、每个序列块内位置的指数衰减系数，每个head对应一个 BLOCK 长度的向量
        k_decay = torch.exp(-s.to(cal_type) * (BLOCK - array.reshape(1, -1)))
        
        # ===========================================
        grid = (b * h * NUM_BLOCK, NUM_CBLOCK)
        _fwd_diag_kernel[grid](q,
                               k,
                               v,
                               o,
                               s,
                               #
                               b,
                               h,
                               n,
                               d,
                               e,
                               #
                               BLOCK=BLOCK,
                               NUM_BLOCK=NUM_BLOCK,
                               CBLOCK=CBLOCK)
        # ===========================================
        # 对d和e维度进行分片（=1为不分片）
        NUM_FBLOCK = 1
        D_FBLOCK = d // NUM_FBLOCK
        assert d % NUM_FBLOCK == 0
        E_FBLOCK = e // NUM_FBLOCK
        assert e % NUM_FBLOCK == 0

        # 新的 CBLOCK
        CBLOCK = 64
        NUM_CBLOCK = BLOCK // CBLOCK
        assert BLOCK % CBLOCK == 0, "BLOCK must be a multiple of CBLOCK"
        ## 这里是分了块的kv_cache，可能需要修改为 dim2为1，
        kv = torch.empty((b, h, NUM_BLOCK, d, e),
                         dtype=cal_type,
                         device=q.device)
        grid = (b * h, NUM_BLOCK)
        _fwd_kv_parallel[grid](
            k,
            v,
            k_decay,
            kv,
            b,
            h,
            n,
            d,
            e,
            BLOCK=BLOCK,
            NUM_BLOCK=NUM_BLOCK,
            D_FBLOCK=D_FBLOCK,
            E_FBLOCK=E_FBLOCK,
            NUM_FBLOCK=NUM_FBLOCK,
            CBLOCK=CBLOCK,
            NUM_CBLOCK=NUM_CBLOCK,
        )
        # ===========================================
        grid = (b * h, NUM_FBLOCK)
        _fwd_kv_reduce[grid](s,
                             kv,
                             kv_history,
                             b,
                             h,
                             n,
                             d,
                             e,
                             BLOCK=BLOCK,
                             NUM_BLOCK=NUM_BLOCK,
                             D_FBLOCK=D_FBLOCK,
                             E_FBLOCK=E_FBLOCK) 
        # ===========================================
        grid = (b * h, NUM_BLOCK * NUM_CBLOCK)
        _fwd_none_diag_kernel[grid](
            q,
            o,
            s,
            kv,
            b,
            h,
            n,
            d,
            e,
            BLOCK=BLOCK,
            NUM_BLOCK=NUM_BLOCK,
            E_FBLOCK=E_FBLOCK,
            CBLOCK=CBLOCK,
            NUM_CBLOCK=NUM_CBLOCK,
        )
        # ===========================================
        ctx.save_for_backward(q, k, v, s, kv)
        ctx.BLOCK = BLOCK

        return o.to(input_type), kv_history


lightning_attention_ = _attention.apply


def lightning_attention(q, k, v, ed, block_size=256, kv_history=None):
    """
    Apply lightning attention algorithm 
    to compute attention efficiently.
    
    Args:
        q: Query tensor of shape [batch, heads, seq_len, dim]
        k: Key tensor of shape [batch, heads, seq_len, dim]
        v: Value tensor of shape [batch, heads, seq_len, dim_v]
        ed: Decay rate tensor of shape [heads]
        block_size: Size of blocks for block-sparse attention
        kv_history: Optional key-value history from previous computations
        
    Returns:
        output: Attention output
        kv: Updated key-value history
    """
    d = q.shape[-1]
    e = v.shape[-1]

    if ed.dim() == 1:
        ed = ed.view(1, -1, 1, 1)

    # # m = 128 if d >= 128 else 64
    # m = 64 if d >= 64 else 16
    # assert d % m == 0, f"Dimension d ({d}) must be divisible by m ({m})"
    # arr = [m * i for i in range(d // m + 1)]
    # if arr[-1] != d:
    #     arr.append(d)
    # n = len(arr)
    # output = 0

    if kv_history is None:
        kv_history = torch.zeros((q.shape[0], q.shape[1], d, e),
                                 dtype=torch.float32,
                                 device=q.device)
    else:
        kv_history = kv_history.clone().contiguous()

    # for i in range(n - 1):
    #     s = arr[i]
    #     ee = arr[i + 1]
    #     q1 = q[..., s:ee]
    #     k1 = k[..., s:ee]
    #     o, kv = lightning_attention_(q1, k1, v, ed, kv_history)
    #     output = output + o

    output, kv = lightning_attention_(q, k, v, ed, kv_history)

    return output, kv