import torch
import triton
import triton.language as tl
import torch_npu
from triton.runtime.driver import driver

def get_soc_info():
    target = driver.active.get_current_target() 
    device = driver.active.get_current_device() 
    # dict: {"max_shared_mem": int, "num_aicore": int, "num_vectorcore": int}
    prop = driver.active.utils.get_device_properties(device) 
    return prop

@triton.jit
def apply_softcap(S, x):
    Sdiv = S / x
    p1 = tl.exp(Sdiv)
    p2 = tl.exp(-Sdiv)
    return x * (p1 - p2) / (p1 + p2)

@triton.jit
def _fwd_kernel(
    Q, K, V, Out,
    block_table, kv_ptr,
    scale, softcap, num_seqs,
    q_len, num_query_heads, q_k_ratio, 
    block_table_stride,
    stride_q_0, stride_q_1, stride_q_2, stride_q_3,
    stride_k_0, stride_k_1, stride_k_2, stride_k_3,
    stride_v_0, stride_v_1, stride_v_2, stride_v_3,
    stride_o_0, stride_o_1, stride_o_2, stride_o_3,
    USE_SOFTCAP, SLIDING_WINDOW,
    BLOCK_M: tl.constexpr,
    HEAD_DIM: tl.constexpr, BLOCK_SIZE: tl.constexpr,
    CORE_NUM: tl.constexpr, TASKS: tl.constexpr,
):
    core_id = tl.program_id(0)
    hz_cnt = num_seqs * num_query_heads
    for idx in range(core_id, TASKS, CORE_NUM):
        start_m = idx // hz_cnt
        off_hz = idx % hz_cnt
        off_h = off_hz % num_query_heads
        off_z = off_hz // num_query_heads
        off_kv_h = off_h // q_k_ratio
        Q_base = Q + off_z * stride_q_0 + off_h * stride_q_2
        
        offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
        offs_d = tl.arange(0, HEAD_DIM)
        off_q = offs_m[:, None] * stride_q_1 + offs_d[None, :] * stride_q_3

        m_i = tl.full([BLOCK_M], float('-inf') ,dtype=tl.float32)
        l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32)
        acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)

        q = tl.load(Q_base + off_q, mask=offs_m[:, None] < q_len)

        kv_len = tl.load(kv_ptr + off_z).to(tl.int32)
        
        context_len = kv_len - q_len

        num_blocks = tl.cdiv(kv_len, BLOCK_SIZE)

        for j in range(0, num_blocks):
            physical_block_idx = tl.load(block_table + off_z * block_table_stride + j).to(tl.int32)
            offs_n = tl.arange(0, BLOCK_SIZE)
            
            off_k = (physical_block_idx * stride_k_0 
                    + off_kv_h * stride_k_2
                    + offs_n[:, None] * stride_k_1
                    + offs_d[None, :] * stride_k_3)
            off_v = (physical_block_idx * stride_v_0 
                    + off_kv_h * stride_v_2
                    + offs_n[:, None] * stride_v_1
                    + offs_d[None, :] * stride_v_3)
            
            k = tl.load(K + off_k, mask=offs_n[:, None] < BLOCK_SIZE, other=0.0)

            v = tl.load(V + off_v, mask=offs_n[:, None] < BLOCK_SIZE, other=0.0)

            qk = tl.zeros(shape=(BLOCK_M, BLOCK_SIZE), dtype=tl.float32)

            k_trans = tl.trans(k)
            qk += tl.dot(q, k_trans)
            qk *= scale

            # 软上限
            if USE_SOFTCAP:
                qk = apply_softcap(qk, softcap)

            # 对角掩码设置 bing
            kv_offsets = j * BLOCK_SIZE + offs_n
            boundary = context_len + offs_m[:, None]
            kv_mask = kv_offsets[None, :] <= boundary
            q_mask = offs_m < q_len
            full_mask = q_mask[:, None] & kv_mask
            qk = tl.where(full_mask, qk, float("-inf"))
            
            # sliding_window
            if SLIDING_WINDOW > 0:
                sliding_diff = boundary - kv_offsets[None, :]
                sliding_cond = sliding_diff < SLIDING_WINDOW
                qk = tl.where(sliding_cond, qk, float("-inf"))

            m_ij = tl.max(qk, axis=1)
            m_j = tl.maximum(m_i, m_ij)
            m_j = tl.where(m_j > float("-inf"), m_j, 0.0)
            p = tl.exp(qk - m_j[:, None])
            l_j = tl.sum(p, axis=1)
            alpha = tl.exp(m_i - m_j)
            acc = acc * alpha[:, None]
            l_i = l_i * alpha + l_j
            m_i = m_j

            p = p.to(v.dtype)
            acc += tl.dot(p, v)

        acc = acc / l_i[:, None]

        off_o = off_z * stride_o_0 + off_h * stride_o_2 + offs_m[:, None] * stride_o_1 + offs_d[None, :] * stride_o_3
        tl.store(Out + off_o, acc.to(Out.dtype.element_ty), mask=offs_m[:, None] < q_len)


def unified_attn(
    q, k, v, kv_lens, block_table, scale,
    window_size, softcap
):
    dtype = q.dtype
    q = q.contiguous()
    k = k.contiguous()
    v = v.contiguous()
    block_size = v.shape[1]
    batch_size = q.shape[0]
    # q (z, q_len, q_heads, head_size)
    num_query_heads = q.shape[2]
    # k (num_blocks, block_size, k_heads, head_size)
    num_kv_heads = k.shape[2]
    assert num_query_heads % num_kv_heads == 0
    q_k_ratio = num_query_heads // num_kv_heads
    head_size = q.shape[-1]

    # UB overflow
    MIN_BLOCK_SIZE = 16
    MAX_BLOCK_SIZE = 128
    assert block_size >= MIN_BLOCK_SIZE and block_size <= MAX_BLOCK_SIZE
    # assert not (head_size >= 128 and block_size >= 128)

    out = torch.empty_like(q, dtype=dtype)

    BLOCK_M = 16

    prop = get_soc_info()
    num_aicore = prop["num_aicore"]
    num_vectorcore = prop["num_vectorcore"]
    grid_cube = (num_aicore, 1, 1)
    TASKS = triton.cdiv(q.shape[1], BLOCK_M) * batch_size * num_query_heads
    
    _fwd_kernel[grid_cube](
        q, k, v, out,
        block_table, kv_lens, scale, softcap,
        q.shape[0], q.shape[1], q.shape[2], q_k_ratio,
        block_table.stride(0),
        q.stride(0), q.stride(1), q.stride(2), q.stride(3),
        k.stride(0), k.stride(1), k.stride(2), k.stride(3),
        v.stride(0), v.stride(1), v.stride(2), v.stride(3),
        out.stride(0), out.stride(1), out.stride(2), out.stride(3),
        USE_SOFTCAP=(softcap > 0), SLIDING_WINDOW=(1 + window_size[0]),
        BLOCK_M=BLOCK_M,HEAD_DIM=head_size,
        BLOCK_SIZE=block_size,CORE_NUM=num_aicore,TASKS=TASKS
    )
    return out

