import math
import pytest
import torch
import numpy as np
import random
import triton
import triton.language as tl
import os
from triton.runtime.driver import driver

def get_soc_info():
    target = driver.active.get_current_target()
    device = driver.active.get_current_device()
    prop = driver.active.utils.get_device_properties(device)
    return prop

def dense_to_crow_col(x):
    pad = -1
    dim = x.dim()
    assert x.dim() in (2, 3)
    if x.dim() == 2:
        x = x[None]
    x = [xi.to_sparse_csr() for xi in x]
    crows = torch.vstack([xi.crow_indices() for xi in x])
    cols = [xi.col_indices() for xi in x]
    max_cols = max(len(xi) for xi in cols)
    cols = [torch.cat([xi, pad + xi.new_zeros(max_cols - xi.shape[0])]) for xi in cols]
    cols = torch.vstack(cols)
    if dim == 2:
        crows = crows[0]
        cols = cols[0]
    return crows, cols


def crow_col_to_dense(crows, cols, dtype=torch.float16):
    dim = crows.dim()
    if dim == 1:
        crows = crows[None]
        cols = cols[None]
    device = crows.device
    crows, cols = crows.cpu(), cols.cpu()  # faster in cpu
    shape = (crows.shape[0], crows.shape[1] - 1, cols.max() + 1)
    x = torch.zeros(shape, dtype=dtype)
    for i in range(shape[0]):
        for j in range(shape[1]):
            x[i, j, cols[i, crows[i, j]:crows[i, j+1]]] = 1
    if dim == 1:
        x = x[0]
    return x.to(device)

def dense_to_ccol_row(x):
    x = x.transpose(-2, -1)
    return dense_to_crow_col(x).contiguous()

def ccol_row_to_dense(ccol, rows, dtype=torch.float16):
    return crow_col_to_dense(ccol, rows, dtype).permute(0, 2, 1).contiguous()

def get_sparse_attn_mask_homo_head(q_len, N_CTX, dtype, device, block_size=128, local_blocks=4, vert_stride=4, return_dense=False):
    with torch.no_grad():
        N_BLOCK = triton.cdiv(N_CTX, block_size)
        q_pos = torch.arange(N_BLOCK)[:, None]
        k_pos = torch.arange(N_BLOCK)[None]
        mask_vert_strided = (torch.arange(N_BLOCK) + 1) % vert_stride == 0
        block_mask_dense = ((q_pos >= k_pos) & ((q_pos - k_pos < local_blocks) | mask_vert_strided)).to(device).to(dtype)
        N_BLOCK_Q = triton.cdiv(q_len, block_size)
        block_mask_dense_output = block_mask_dense[-N_BLOCK_Q:].contiguous().to_sparse_csr()
    if return_dense:
        mask_dense = torch.kron(block_mask_dense, block_mask_dense.new_ones((block_size, block_size)))
        causal_mask = torch.tril(torch.ones(N_CTX, N_CTX)).type_as(mask_dense)[-q_len:]
        mask_dense = mask_dense[-q_len:, :N_CTX] * causal_mask
        return (block_mask_dense_output.crow_indices(), block_mask_dense_output.col_indices()), block_mask_dense, mask_dense
    else:
        return (block_mask_dense_output.crow_indices(), block_mask_dense_output.col_indices()), block_mask_dense, None

def get_sparse_attn_mask(q, N_CTX, block_size=64, local_blocks=4, vert_stride=4, homo_head=True, return_dense=False):
    n_heads = q.size(1)
    q_len = q.size(2)
    dtype = q.dtype
    device = q.device
    if homo_head:
        with torch.no_grad():
            (crow, col), block_mask_dense, mask_dense = get_sparse_attn_mask_homo_head(q_len, N_CTX, dtype, device, block_size, local_blocks, vert_stride, return_dense)
            crow = crow[None].expand(n_heads, crow.shape[0])
            col = col[None].expand(n_heads, col.shape[0])
            if return_dense:
                mask_dense = mask_dense[None].expand(n_heads, *mask_dense.shape)
            return (crow, col), block_mask_dense, mask_dense

    with torch.no_grad():
        N_BLOCK = triton.cdiv(N_CTX, block_size)
        q_pos = torch.arange(N_BLOCK)[None, :, None]
        k_pos = torch.arange(N_BLOCK)[None, None]
        head_sliding_step = max(1, int(vert_stride / n_heads))  
        mask_vert_strided = [(torch.arange(N_BLOCK) + h * head_sliding_step + 1) % vert_stride == 0 for h in range(n_heads)]
        mask_vert_strided = torch.vstack(mask_vert_strided).unsqueeze(1)
        block_mask_dense = ((q_pos >= k_pos) & ((q_pos - k_pos < local_blocks) | mask_vert_strided)).to(device).to(dtype)
        N_BLOCK_Q = triton.cdiv(q_len, block_size)
        block_mask_dense_output = block_mask_dense[:, -N_BLOCK_Q:]
    if return_dense:
        mask_dense = torch.kron(block_mask_dense, block_mask_dense.new_ones((block_size, block_size)))
        causal_mask = torch.tril(torch.ones(N_CTX, N_CTX)).type_as(mask_dense)[-q_len:]
        mask_dense = mask_dense[..., -q_len:, :N_CTX] * causal_mask[None]
        return dense_to_crow_col(block_mask_dense_output), block_mask_dense, mask_dense
    else:
        return dense_to_crow_col(block_mask_dense_output), block_mask_dense, None

@triton.jit
def _fwd_kernel(
    Q, K, V, sm_scale,
    layout_crow_ptr,
    layout_col_ptr,
    layout_crow_stride_h, layout_crow_stride_m,
    layout_col_stride_h, layout_col_stride_m,
    L, M,
    Out,
    stride_qz, stride_qh, stride_qm, stride_qd,
    stride_kz, stride_kh, stride_kn, stride_kd,
    stride_vz, stride_vh, stride_vn, stride_vd,
    stride_oz, stride_oh, stride_om, stride_od,
    Z, H, N_CTX,
    PAST_LEN,
    Q_ROUNDED_LEN,
    BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
    BLOCK_N: tl.constexpr,
    EVEN_M_BLOCK: tl.constexpr,
    EVEN_N_BLOCK: tl.constexpr,
    NUM_DBLOCKS: tl.constexpr,
):
    Q_LEN = N_CTX - PAST_LEN
    start_m = tl.program_id(0)
    off_hz = tl.program_id(1)
    off_h = off_hz % H
    off_z = off_hz // H
    Q += off_z * stride_qz + off_h * stride_qh
    K += off_z * stride_kz + off_h * stride_kh
    V += off_z * stride_vz + off_h * stride_vh

    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = tl.arange(0, BLOCK_N)
    offs_d = tl.arange(0, BLOCK_DMODEL)
    off_q = offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qd
    off_k = offs_n[:, None] * stride_vn + offs_d[None, :] * stride_vd
    # off_k = offs_n[None, :] * stride_kn + offs_d[:, None] * stride_kd
    off_v = offs_n[:, None] * stride_vn + offs_d[None, :] * stride_vd

    q_ptrs = Q + off_q
    k_ptrs = K + off_k
    v_ptrs = V + off_v

    m_i = tl.full([BLOCK_M], float('-inf') ,dtype=tl.float32)
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
    acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
    if NUM_DBLOCKS >= 2:
        acc2 = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)

    if EVEN_M_BLOCK:
        q = tl.load(q_ptrs)
        if NUM_DBLOCKS >= 2:
            q2 = tl.load(q_ptrs + BLOCK_DMODEL * stride_qd)
    else:
        q = tl.load(q_ptrs, mask=offs_m[:, None] < Q_LEN)
        # 相当于这里再读一次内容进来
        if NUM_DBLOCKS >= 2:
            q2 = tl.load(q_ptrs + BLOCK_DMODEL * stride_qd, mask=offs_m[:, None] < Q_LEN)

    layout_ptr = layout_crow_ptr + off_h * layout_crow_stride_h + start_m * layout_crow_stride_m
    start_l = tl.load(layout_ptr).to(tl.int32)
    end_l = tl.load(layout_ptr + layout_crow_stride_m).to(tl.int32)

    for col_idx_idx in range(start_l, end_l):
        col_idx = tl.load(layout_col_ptr +  off_h * layout_col_stride_h + col_idx_idx * layout_col_stride_m).to(tl.int32)
        start_n = col_idx * BLOCK_N

        if EVEN_N_BLOCK:
            k = tl.load(k_ptrs + start_n * stride_kn)
        else:
            # k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_n[None, :] + start_n < N_CTX)
            k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_n[:, None] + start_n < N_CTX)
        qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
        k_trans = tl.trans(k)
        qk += tl.dot(q, k_trans)

        if NUM_DBLOCKS >= 2:
            if EVEN_N_BLOCK:
                k = tl.load(k_ptrs + start_n * stride_kn + BLOCK_DMODEL * stride_kd)
            else:
                # k = tl.load(k_ptrs + start_n * stride_kn + BLOCK_DMODEL * stride_kd, mask=offs_n[None, :] + start_n < N_CTX)
                k = tl.load(k_ptrs + start_n * stride_kn + BLOCK_DMODEL * stride_kd, mask=offs_n[:, None] + start_n < N_CTX)
            k_trans = tl.trans(k)
            qk += tl.dot(q2, k_trans)

        qk *= sm_scale
        qk = tl.where(offs_m[:, None] + PAST_LEN >= (start_n + offs_n[None, :]), qk, float('-inf'))

        m_ij = tl.max(qk, 1)
        p = tl.exp(qk - m_ij[:, None])
        l_ij = tl.sum(p, 1)

        m_i_new = tl.maximum(m_i, m_ij)
        alpha = tl.exp(m_i - m_i_new)
        beta = tl.exp(m_ij - m_i_new)
        l_i_new = alpha * l_i + beta * l_ij

        p_scale = beta / l_i_new
        p = p * p_scale[:, None]

        acc_scale = l_i / l_i_new * alpha

        acc = acc * acc_scale[:, None]
        if NUM_DBLOCKS >= 2:
            acc2 = acc2 * acc_scale[:, None]
       
        if EVEN_N_BLOCK:
            v = tl.load(v_ptrs + start_n * stride_vn)
        else:
            v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_n[:, None] + start_n < N_CTX)
        v = v.to(p.dtype)
        acc += tl.dot(p, v)

        if NUM_DBLOCKS >= 2:
            if EVEN_N_BLOCK:
                v = tl.load(v_ptrs + start_n * stride_vn + BLOCK_DMODEL * stride_vd)
            else:
                v = tl.load(v_ptrs + start_n * stride_vn + BLOCK_DMODEL * stride_vd, mask=offs_n[:, None] + start_n < N_CTX)
            v = v.to(p.dtype)
            acc2 += tl.dot(p, v)

        l_i = l_i_new
        m_i = m_i_new

    l_ptrs = L + off_hz * N_CTX + offs_m
    m_ptrs = M + off_hz * N_CTX + offs_m
    if EVEN_M_BLOCK:
        tl.store(l_ptrs, l_i)
        tl.store(m_ptrs, m_i)
    else:
        tl.store(l_ptrs, l_i,  mask=offs_m < Q_LEN)
        tl.store(m_ptrs, m_i,  mask=offs_m < Q_LEN)

    off_o = off_z * stride_oz + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :] * stride_od
    out_ptrs = Out + off_o
    tl.store(out_ptrs, acc.to(Out.dtype.element_ty),  mask=offs_m[:, None] < Q_LEN)
    if NUM_DBLOCKS >= 2:
        tl.store(out_ptrs + BLOCK_DMODEL * stride_od, acc2.to(Out.dtype.element_ty),  mask=offs_m[:, None] < Q_LEN)

def forward(q, k, v, layout_crow_indices, layout_col_indices, sm_scale, BLOCK_M, BLOCK_N):
    '''
    :param q, k, v: [batch, n_heads, seq_len, model_dim]. len of q is allowed to be different than k/v.
    :param layout_crow_indices, layout_col_indices: same as CSR.crow_indices, and CSR.col_indices used to preresent a sparse tensor.
        Each element represent a block, i.e, all elements in a block to be attentdd, or not attended at all..
    '''
    assert q.shape[-1] == k.shape[-1] == v.shape[-1]
    assert k.shape[2] == v.shape[2]
    o = torch.empty_like(q).contiguous()
    
    # ====== Ascend Adapter ===== 
    prop = get_soc_info()
    num_aicore = prop["num_aicore"]
    num_vectorcore = prop["num_vectorcore"]
    grid_cube = (num_aicore, 1, 1)

    m_loops = triton.cdiv(q.shape[2], BLOCK_M)
    n_loops = q.shape[0] * q.shape[1]
    # ====== Ascend ===== 

    grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)

    q_rounded_len = grid[0] * BLOCK_M

    L = torch.empty((q.shape[0] * q.shape[1], q_rounded_len), device=q.device, dtype=torch.float32)
    m = torch.empty((q.shape[0] * q.shape[1], q_rounded_len), device=q.device, dtype=torch.float32)

    if layout_col_indices.dim() == 1:
        layout_crow_indices = layout_crow_indices[None].expand(q.shape[1] , -1)
        layout_col_indices = layout_col_indices[None].expand(q.shape[1] , -1)

    # 这个是最重要的点
    # 这个控制着计算正确性 以及 UB overflow
    BLOCK_DMODEL = 64

    _fwd_kernel[grid](
        q, k, v, sm_scale,
        layout_crow_indices,
        layout_col_indices,
        layout_crow_indices.stride(0), layout_crow_indices.stride(1),
        layout_col_indices.stride(0), layout_col_indices.stride(1),
        L, m,
        o,
        q.stride(0), q.stride(1), q.stride(2), q.stride(3),
        k.stride(0), k.stride(1), k.stride(2), k.stride(3),
        v.stride(0), v.stride(1), v.stride(2), v.stride(3),
        o.stride(0), o.stride(1), o.stride(2), o.stride(3),
        q.shape[0], q.shape[1], k.shape[2],
        k.shape[2] - q.shape[2],
        q_rounded_len,
        BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N,
        BLOCK_DMODEL=BLOCK_DMODEL,
        EVEN_M_BLOCK=q.shape[2] % BLOCK_M == 0,
        EVEN_N_BLOCK=k.shape[2] % BLOCK_N == 0 ,
        NUM_DBLOCKS=q.shape[-1] // BLOCK_DMODEL,
    )

    return o

def block_sparse_attn(
    q, 
    k, 
    v, 
    N_CTX, 
    BLOCK_SIZE,
    LOCAL_BLOCKS, 
    VERT_STRIDE, 
    HOMO_HEAD,
    sm_scale, 
):
    dtype = q.dtype
    _, block_sparse_pattern, _ = get_sparse_attn_mask(
        q, N_CTX, block_size=BLOCK_SIZE,
        local_blocks=LOCAL_BLOCKS, 
        vert_stride=VERT_STRIDE, 
        homo_head=HOMO_HEAD, 
        return_dense=False
    )

    BLOCK_N = BLOCK_SIZE
    NUM_BLOCK =  block_sparse_pattern.size(-1)
    MAX_SEQ_LEN = BLOCK_SIZE * NUM_BLOCK

    grand_layout_crow_indices, grand_layout_col_indices = dense_to_crow_col(block_sparse_pattern)

    MIN_BLOCK_SIZE = 16
    # NUM_DBLOCKS=q.shape[-1] // BLOCK_DMODEL 这个导致的结果 只能是64或者128
    assert q.shape[-1] in [64, 128]
    # BLOCK_DMODEL 这个导致的内容 毕竟 这个容易超过UB的容量的
    assert BLOCK_SIZE in [16, 32, 64]
    assert BLOCK_N >= MIN_BLOCK_SIZE
    BLOCK_M = 16 if q.shape[2] <= 16 else BLOCK_N  

    K_BLOCKS = triton.cdiv(k.shape[2],  BLOCK_SIZE)

    Q_START_BLOCKS = K_BLOCKS - triton.cdiv(q.shape[2], BLOCK_N)
    layout_crow_indices = grand_layout_crow_indices[..., Q_START_BLOCKS:K_BLOCKS+1]
    layout_col_indices = grand_layout_col_indices

    tri_out = forward(
        q, k ,v, layout_crow_indices,
        layout_col_indices, sm_scale, BLOCK_M, BLOCK_N,
    )

    return tri_out