import math
import pytest
import torch
import numpy as np
import random
import triton
import triton.language as tl
import os

class AtlasA2:
    def __init__(self):
        self.UB_SIZE = 192 * 1024
        self.L1_SIZE = 512 * 1024
        self.L0A_SIZE = 64 * 1024
        self.L0B_SIZE = 64 * 1024
        self.L0C_SIZE = 128 * 1024
        self.NUM_AICORE = self.get_soc_info()["num_aicore"]
        self.NUM_VECTORCORE = self.get_soc_info()["num_vectorcore"]

    def get_soc_info(self):
        from triton.runtime.driver import driver
        target = driver.active.get_current_target() 
        device = driver.active.get_current_device() 
        # dict: {"max_shared_mem": int, "num_aicore": int, "num_vectorcore": int}
        prop = driver.active.utils.get_device_properties(device) 
        return prop

class csr_matrix:
    def __init__(self, input_array):
        if not isinstance(input_array, np.ndarray):
            raise ValueError("Input must be a NumPy array")

        self.shape = input_array.shape
        rows, cols = self.shape
        data = []
        col_indices = []
        crow_indices = [0]

        for i in range(rows):
            for j in range(cols):
                if input_array[i, j]:
                    data.append(input_array[i, j])
                    col_indices.append(j)
            crow_indices.append(len(col_indices))

        self.data = torch.Tensor(data)
        self.col_indices = torch.Tensor(col_indices)
        self.crow_indices = torch.Tensor(crow_indices)

def dense_to_crow_col(x: torch.Tensor):
    """Turning a 2D/3D torch tensor (x) to CSR rows/cols indexing.
    NOTE: col_indices padded -1
    """
    device = x.device
    pad = -1
    dim = x.dim()
    assert x.dim() in (2, 3)
    if x.dim() == 2:
        x = x[None]
    x = [csr_matrix(xi.bool().cpu().numpy()) for xi in x]
    crows = torch.vstack([xi.crow_indices for xi in x])
    cols = [xi.col_indices for xi in x]
    max_cols = max(len(xi) for xi in cols)
    cols = [
        torch.cat([xi, pad + xi.new_zeros(max_cols - xi.shape[0])])
        for xi in cols
    ]
    cols = torch.vstack(cols)
    if dim == 2:
        crows = crows[0]
        cols = cols[0]
    return crows.to(device), cols.to(device)


def crow_col_to_dense(crows, cols, dtype=torch.float16):
    dim = crows.dim()
    if dim == 1:
        crows = crows[None]
        cols = cols[None]
    device = crows.device
    crows, cols = crows.cpu(), cols.cpu()  # faster in cpu
    shape = (crows.shape[0], crows.shape[1] - 1, cols.max() + 1)
    x = torch.zeros(shape, dtype=dtype)
    for i in range(shape[0]):
        for j in range(shape[1]):
            x[i, j, cols[i, crows[i, j]:crows[i, j+1]]] = 1
    if dim == 1:
        x = x[0]
    return x.to(device)

def dense_to_ccol_row(x):
    x = x.transpose(-2, -1)
    return dense_to_crow_col(x).contiguous()

def ccol_row_to_dense(ccol, rows, dtype=torch.float16):
    return crow_col_to_dense(ccol, rows, dtype).permute(0, 2, 1).contiguous()

def get_sparse_attn_mask_homo_head(q_len, k_len, dtype, device, block_size=64, local_blocks=4, vert_strides=4, return_dense=False):
    with torch.no_grad():
        N_BLOCK = triton.cdiv(k_len, block_size)
        q_pos = torch.arange(N_BLOCK)[:, None]
        k_pos = torch.arange(N_BLOCK)[None]
        mask_vert_strided = (torch.arange(N_BLOCK) + 1) % vert_strides == 0
        block_mask_dense = ((q_pos >= k_pos) & ((q_pos - k_pos < local_blocks) | mask_vert_strided)).to(device).to(dtype)
        N_BLOCK_Q = triton.cdiv(q_len, block_size)
        block_mask_dense_output = csr_matrix(block_mask_dense[-N_BLOCK_Q:].contiguous().bool().cpu().numpy())
    if return_dense:
        mask_dense = torch.kron(block_mask_dense, block_mask_dense.new_ones((block_size, block_size)))
        causal_mask = torch.tril(torch.ones(k_len, k_len)).type_as(mask_dense)[-q_len:]
        mask_dense = mask_dense[-q_len:, :k_len] * causal_mask
        return (block_mask_dense_output.crow_indices, block_mask_dense_output.col_indices), block_mask_dense, mask_dense
    else:
        return (block_mask_dense_output.crow_indices, block_mask_dense_output.col_indices), block_mask_dense, None

def get_sparse_attn_mask(q, k_len, block_size=64, local_blocks=4, vert_strides=4, homo_head=True, return_dense=False):
    n_heads = q.size(1)
    q_len = q.size(2)
    dtype = q.dtype
    device = q.device
    if homo_head:
        with torch.no_grad():
            (crow, col), block_mask_dense, mask_dense = get_sparse_attn_mask_homo_head(q_len, k_len, dtype, device, block_size, local_blocks, vert_strides, return_dense)
            crow = crow[None].expand(n_heads, crow.shape[0])
            col = col[None].expand(n_heads, col.shape[0])
            if return_dense:
                mask_dense = mask_dense[None].expand(n_heads, *mask_dense.shape)
            return (crow, col), block_mask_dense, mask_dense

    with torch.no_grad():
        N_BLOCK = triton.cdiv(k_len, block_size)
        q_pos = torch.arange(N_BLOCK)[None, :, None]
        k_pos = torch.arange(N_BLOCK)[None, None]
        head_sliding_step = max(1, int(vert_strides / n_heads))  
        mask_vert_strided = [(torch.arange(N_BLOCK) + h * head_sliding_step + 1) % vert_strides == 0 for h in range(n_heads)]
        mask_vert_strided = torch.vstack(mask_vert_strided).unsqueeze(1)
        block_mask_dense = ((q_pos >= k_pos) & ((q_pos - k_pos < local_blocks) | mask_vert_strided)).to(device).to(dtype)
        N_BLOCK_Q = triton.cdiv(q_len, block_size)
        block_mask_dense_output = block_mask_dense[:, -N_BLOCK_Q:]
    if return_dense:
        mask_dense = torch.kron(block_mask_dense, block_mask_dense.new_ones((block_size, block_size)))
        causal_mask = torch.tril(torch.ones(k_len, k_len)).type_as(mask_dense)[-q_len:]
        mask_dense = mask_dense[..., -q_len:, :k_len] * causal_mask[None]
        return dense_to_crow_col(block_mask_dense_output), block_mask_dense, mask_dense
    else:
        return dense_to_crow_col(block_mask_dense_output), block_mask_dense, None

@triton.jit
def _fwd_kernel(
    Q, K, V, scale,
    layout_crow_ptr,
    layout_col_ptr,
    layout_crow_stride_h, layout_crow_stride_m,
    layout_col_stride_h, layout_col_stride_m,
    Out,
    stride_qz, stride_qh, stride_qm, stride_qd,
    stride_kz, stride_kh, stride_kn, stride_kd,
    stride_vz, stride_vh, stride_vn, stride_vd,
    stride_oz, stride_oh, stride_om, stride_od,
    batch_size, num_query_head, k_len,
    PAST_LEN, q_k_ratio,
    BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr,
    CORE_NUM: tl.constexpr, TASKS: tl.constexpr,
    HEAD_DIM_PADDING: tl.constexpr, BLOCK_SIZE: tl.constexpr,
):
    Q_LEN = k_len - PAST_LEN
    core_id = tl.program_id(0)
    hz_cnt = batch_size * num_query_head
    for idx in range(core_id, TASKS, CORE_NUM):
        start_m = idx // hz_cnt
        off_hz = idx % hz_cnt
        off_h = off_hz % num_query_head
        off_z = off_hz // num_query_head
        off_kv_h = off_h // q_k_ratio
        Q_base = Q + off_z * stride_qz + off_h * stride_qh
        K_base = K + off_z * stride_kz + off_kv_h * stride_kh
        V_base = V + off_z * stride_vz + off_kv_h * stride_vh

        offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
        offs_d = tl.arange(0, HEAD_DIM_PADDING)
        off_q = offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qd

        m_i = tl.full([BLOCK_M], float('-inf') ,dtype=tl.float32)
        l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32)
        acc = tl.zeros([BLOCK_M, HEAD_DIM_PADDING], dtype=tl.float32)

        q = tl.load(Q_base + off_q, mask=(offs_m[:, None] < Q_LEN) & (offs_d[None, :] < HEAD_DIM), other=0.0)

        layout_ptr = layout_crow_ptr + off_h * layout_crow_stride_h + start_m * layout_crow_stride_m
        start_l = tl.load(layout_ptr).to(tl.int32)
        end_l = tl.load(layout_ptr + layout_crow_stride_m).to(tl.int32)

        for col_idx_idx in range(start_l, end_l):
            col_idx = tl.load(layout_col_ptr + off_h * layout_col_stride_h + col_idx_idx * layout_col_stride_m).to(tl.int32)
            start_n = col_idx * BLOCK_SIZE
            offs_n = tl.arange(0, BLOCK_SIZE)

            qk = tl.zeros([BLOCK_M, BLOCK_SIZE], dtype=tl.float32)
            off_k = (start_n * stride_kn
                     + offs_n[:, None] * stride_kn
                     + offs_d[None, :] * stride_kd)
            
            k = tl.load(K_base + off_k, mask=(offs_n[:, None] + start_n < k_len) & (offs_d[None, :] < HEAD_DIM), other=0.0)
            k_trans = tl.trans(k)
            qk += tl.dot(q, k_trans)

            qk *= scale
            qk = tl.where((offs_m[:, None] + PAST_LEN >= (start_n + offs_n[None, :])) & (start_n + offs_n[None, :] < k_len), qk, float('-inf'))

            m_j = tl.maximum(m_i, tl.max(qk, axis=1))
            m_j = tl.where(m_j > float("-inf"), m_j, 0.0)
            p = tl.exp(qk - m_j[:, None])
            l_j = tl.sum(p, axis=1)
            alpha = tl.exp(m_i - m_j)
            acc = acc * alpha[:, None]
            l_i = l_i * alpha + l_j
            m_i = m_j

            off_v = (start_n * stride_vn
                     + offs_n[:, None] * stride_vn
                     + offs_d[None, :] * stride_vd)
        
            v = tl.load(V_base + off_v, mask=(offs_n[:, None] + start_n < k_len) & (offs_d[None, :] < HEAD_DIM), other=0.0)
            p = p.to(v.dtype)
            acc += tl.dot(p, v)

        acc = acc / l_i[:, None]

        off_o = off_z * stride_oz + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :] * stride_od
        out_ptrs = Out + off_o
        tl.store(out_ptrs, acc.to(Out.dtype.element_ty),  mask=(offs_m[:, None] < Q_LEN) & (offs_d[None, :] < HEAD_DIM))

def block_sparse_attention(
    q, 
    k, 
    v, 
    block_size,
    local_blocks, 
    vert_strides, 
    homo_head,
    scale,
    block_sparse_pattern = None
):
    dtype = q.dtype
    if block_sparse_pattern is None:
        _, block_sparse_pattern, _ = get_sparse_attn_mask(
            q, k.shape[2], block_size=block_size,
            local_blocks=local_blocks, 
            vert_strides=vert_strides, 
            homo_head=homo_head,
            return_dense=False
        )
    
    assert k.shape[1] == v.shape[1]
    assert q.shape[1] % k.shape[1] == 0
    assert q.shape[-1] == k.shape[-1] == v.shape[-1]
    assert k.shape[2] == v.shape[2]

    num_query_heads = q.shape[1]
    num_kv_heads = k.shape[1]
    q_k_ratio  = num_query_heads // num_kv_heads

    grand_layout_crow_indices, grand_layout_col_indices = dense_to_crow_col(block_sparse_pattern)

    out = torch.empty_like(q).contiguous()

    HEAD_DIM = q.shape[-1]
    HEAD_DIM_PADDING = triton.next_power_of_2(HEAD_DIM)
    BLOCK_SIZE_PADDING = triton.next_power_of_2(block_size)
    
    assert block_size == BLOCK_SIZE_PADDING

    # 设置分块大小
    BLOCK_M = 16 if q.shape[2] <= 16 else block_size  

    atlasA2 = AtlasA2()
    prop = atlasA2.get_soc_info()
    num_aicore = prop["num_aicore"]
    num_vectorcore = prop["num_vectorcore"]

    grid_cube = (num_aicore, 1, 1)
    TASKS = triton.cdiv(q.shape[2], BLOCK_M) * q.shape[0] * q.shape[1]
    K_BLOCKS = triton.cdiv(k.shape[2],  block_size)

    Q_START_BLOCKS = K_BLOCKS - triton.cdiv(q.shape[2], block_size)
    layout_crow_indices = grand_layout_crow_indices[..., Q_START_BLOCKS:K_BLOCKS+1]
    layout_col_indices = grand_layout_col_indices

    if layout_col_indices.dim() == 1:
        layout_crow_indices = layout_crow_indices[None].expand(q.shape[1] , -1)
        layout_col_indices = layout_col_indices[None].expand(q.shape[1] , -1)

    _fwd_kernel[grid_cube](
        q, k, v, scale,
        layout_crow_indices,
        layout_col_indices,
        layout_crow_indices.stride(0), layout_crow_indices.stride(1),
        layout_col_indices.stride(0), layout_col_indices.stride(1),
        out,
        q.stride(0), q.stride(1), q.stride(2), q.stride(3),
        k.stride(0), k.stride(1), k.stride(2), k.stride(3),
        v.stride(0), v.stride(1), v.stride(2), v.stride(3),
        out.stride(0), out.stride(1), out.stride(2), out.stride(3),
        q.shape[0], q.shape[1], k.shape[2],
        k.shape[2] - q.shape[2], q_k_ratio,
        BLOCK_M=BLOCK_M,
        HEAD_DIM=HEAD_DIM, CORE_NUM=num_aicore, TASKS=TASKS,
        HEAD_DIM_PADDING=HEAD_DIM_PADDING, BLOCK_SIZE=block_size,
    )

    return out