import random
from typing import Optional
import test_common
import pytest
import torch
import numpy as np

from block_sparse_attn import block_sparse_attn

# ========= TESTING =======
# NUM_PREFILL_SEQS = [3]
# NUM_HEADS = [(2, 2)]
# HEAD_SIZES = [8, 16]
# LOCAL_BLOCKS = [8, 16]
# VERT_STRIDES = [8, 16]
# BLOCK_SIZES = [8, 16]
# HOMO_HEADS = [True, False]
# DTYPES = [torch.float16, torch.bfloat16]
# NPU_DEVICES = ['npu']
# MAX_SEQ_LEN = [128] # 需要根据内存大小来设置数值
NUM_PREFILL_SEQS = [1, 4, 8]
NUM_HEADS = [(8, 8), (16, 8), (32, 8), (32, 32)]
HEAD_SIZES = [64, 128, 256]
LOCAL_BLOCKS = [4, 8, 16]
VERT_STRIDES = [4, 8, 16]
BLOCK_SIZES = [16, 32, 64]
HOMO_HEADS = [True, False]
DTYPES = [torch.float16, torch.bfloat16]
# NPU_DEVICES = ['npu']
device = "npu"
MAX_SEQ_LEN = [512, 1024, 2048] # 需要根据内存大小来设置数值
# ========= TESTING =======

def ref_masked_attention(
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    scale: float,
    attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    attn_weights = scale * torch.einsum("qhd,khd->hqk", query, key).float()
    if attn_mask is not None:
        attn_weights = attn_weights + attn_mask.float()
    attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype)
    out = torch.einsum("hqk,khd->qhd", attn_weights, value)
    return out

def ref_multi_query_kv_attention(
    cu_seq_lens: list[int],
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    scale: float,
    dtype: torch.dtype,
) -> torch.Tensor:
    num_seqs = len(cu_seq_lens) - 1
    ref_outputs = []
    for i in range(num_seqs):
        start_idx = cu_seq_lens[i]
        end_idx = cu_seq_lens[i + 1]
        seq_len = end_idx - start_idx
        attn_mask = torch.triu(torch.ones(seq_len, seq_len, dtype=dtype),
                               diagonal=1)
        attn_mask = attn_mask * torch.finfo(dtype).min
        attn_mask = attn_mask.to(dtype=dtype)

        ref_output = ref_masked_attention(
            query[start_idx:end_idx],
            key[start_idx:end_idx],
            value[start_idx:end_idx],
            scale,
            attn_mask=attn_mask,
        )
        ref_outputs.append(ref_output)
    ref_output = torch.cat(ref_outputs, dim=0)
    return ref_output

TEST_CASES = [
    # 基础配置组（512序列长度）
    # [512, 1, (8, 8), 64, 4, 4, 16, True, torch.float16],
    # [512, 1, (8, 8), 64, 4, 4, 32, False, torch.bfloat16],
    # [512, 1, (16, 8), 64, 4, 8, 16, True, torch.float16],
    # [512, 1, (16, 8), 64, 8, 4, 32, False, torch.bfloat16],
    # [512, 1, (32, 8), 64, 4, 8, 64, True, torch.float16],
    # [512, 1, (32, 16), 64, 8, 8, 32, False, torch.bfloat16],
    # [512, 1, (32, 32), 64, 16, 4, 16, True, torch.float16],
    # [512, 1, (16, 16), 64, 4, 16, 64, False, torch.bfloat16],
    # [512, 4, (8, 8), 64, 4, 4, 16, True, torch.float16],
    # [512, 4, (16, 8), 64, 8, 8, 32, False, torch.bfloat16],
    # [512, 4, (32, 8), 64, 4, 16, 64, True, torch.float16],
    # [512, 4, (32, 32), 64, 16, 4, 32, False, torch.bfloat16],
    # [512, 8, (8, 8), 64, 8, 8, 16, True, torch.float16],
    # [512, 8, (16, 16), 64, 4, 4, 64, False, torch.bfloat16],
    # [512, 8, (32, 8), 64, 8, 16, 32, True, torch.float16],
    # [512, 8, (32, 16), 64, 16, 8, 16, False, torch.bfloat16],
    
    # # 128头维度扩展组
    # [512, 1, (8, 8), 128, 4, 4, 16, True, torch.float16],
    # =====
    [512, 1, (16, 8), 128, 8, 8, 32, False, torch.bfloat16],
    [512, 1, (32, 16), 128, 4, 16, 64, True, torch.float16],
    # =====
    # [512, 1, (32, 32), 128, 16, 4, 32, False, torch.bfloat16],
    # [512, 4, (8, 8), 128, 8, 8, 16, True, torch.float16],
    # [512, 4, (16, 16), 128, 4, 4, 64, False, torch.bfloat16],
    # [512, 4, (32, 8), 128, 8, 16, 32, True, torch.float16],
    # [512, 8, (16, 8), 128, 16, 8, 16, False, torch.bfloat16],
    # [512, 8, (32, 16), 128, 4, 8, 64, True, torch.float16],
    # [512, 8, (32, 32), 128, 8, 4, 32, False, torch.bfloat16],
    
    # 256头维度组
    # [512, 1, (8, 8), 256, 4, 8, 16, True, torch.float16],
    # [512, 1, (16, 16), 256, 8, 4, 32, False, torch.bfloat16],
    # ========
    [512, 4, (32, 8), 256, 4, 16, 64, True, torch.float16],
    # ========
    # [512, 4, (32, 32), 256, 16, 8, 32, False, torch.bfloat16],
    # ========
    [512, 8, (16, 8), 256, 8, 4, 16, True, torch.float16],
    
    
    # 1024序列长度组
    [1024, 1, (8, 8), 64, 4, 4, 32, True, torch.float16],
    # ========
    # [1024, 1, (16, 8), 64, 8, 8, 64, False, torch.bfloat16],
    # [1024, 1, (32, 16), 64, 4, 16, 32, True, torch.float16],
    # [1024, 1, (32, 32), 64, 16, 4, 64, False, torch.bfloat16],
    # [1024, 4, (8, 8), 64, 8, 8, 32, True, torch.float16],
    # [1024, 4, (16, 16), 64, 4, 4, 64, False, torch.bfloat16],
    # [1024, 4, (32, 8), 64, 8, 16, 32, True, torch.float16],
    # [1024, 4, (32, 16), 64, 16, 8, 64, False, torch.bfloat16],
    # [1024, 8, (8, 8), 64, 4, 8, 32, True, torch.float16],
    # [1024, 8, (16, 8), 64, 8, 4, 64, False, torch.bfloat16],
    # [1024, 8, (32, 32), 64, 4, 16, 32, True, torch.float16],
    
    # 1024序列+128头维度
    # ========
    [1024, 1, (8, 8), 128, 4, 8, 32, True, torch.float16],
    # =======
    # [1024, 1, (16, 16), 128, 8, 4, 64, False, torch.bfloat16],
    # ======
    [1024, 4, (32, 8), 128, 4, 16, 32, True, torch.float16],
    # ======
    # [1024, 4, (32, 32), 128, 16, 8, 64, False, torch.bfloat16],
    # [1024, 8, (16, 8), 128, 8, 4, 32, True, torch.float16],
    # [1024, 8, (32, 16), 128, 4, 8, 64, False, torch.bfloat16],
    
    # # 1024序列+256头维度
    # [1024, 1, (8, 8), 256, 4, 16, 32, True, torch.float16],
    # [1024, 1, (16, 16), 256, 8, 8, 64, False, torch.bfloat16],
    # [1024, 4, (32, 8), 256, 16, 4, 32, True, torch.float16],
    # [1024, 8, (32, 32), 256, 4, 8, 64, False, torch.bfloat16],
    
    # # 2048序列长度组
    # [2048, 1, (8, 8), 64, 4, 4, 64, True, torch.float16],
    # [2048, 1, (16, 8), 64, 8, 8, 32, False, torch.bfloat16],
    # [2048, 1, (32, 16), 64, 4, 16, 64, True, torch.float16],
    # [2048, 1, (32, 32), 64, 16, 4, 32, False, torch.bfloat16],
    # [2048, 4, (8, 8), 64, 8, 8, 64, True, torch.float16],
    # [2048, 4, (16, 16), 64, 4, 4, 32, False, torch.bfloat16],
    # [2048, 4, (32, 8), 64, 8, 16, 64, True, torch.float16],
    # [2048, 8, (16, 8), 64, 16, 8, 32, False, torch.bfloat16],
    # [2048, 8, (32, 16), 64, 4, 8, 64, True, torch.float16],
    
    # # 2048序列+128头维度
    # [2048, 1, (8, 8), 128, 4, 8, 64, True, torch.float16],
    # [2048, 1, (16, 16), 128, 8, 4, 32, False, torch.bfloat16],
    # [2048, 4, (32, 8), 128, 4, 16, 64, True, torch.float16],
    # [2048, 4, (32, 32), 128, 16, 8, 32, False, torch.bfloat16],
    # [2048, 8, (16, 8), 128, 8, 4, 64, True, torch.float16],
    
    # # 2048序列+256头维度
    # [2048, 1, (8, 8), 256, 4, 16, 64, True, torch.float16],
    # [2048, 1, (16, 16), 256, 8, 8, 32, False, torch.bfloat16],
    # [2048, 4, (32, 8), 256, 16, 4, 64, True, torch.float16],
    
    # # 多序列组合组
    # [512, 4, (8, 8), 128, 4, 8, 32, False, torch.float16],
    # [512, 8, (16, 8), 256, 8, 16, 64, True, torch.bfloat16],
    # [1024, 4, (32, 16), 128, 4, 4, 32, False, torch.float16],
    # [1024, 8, (32, 32), 256, 8, 8, 64, True, torch.bfloat16],
    # [2048, 4, (16, 8), 128, 16, 16, 32, False, torch.float16],
    # [2048, 8, (8, 8), 256, 4, 8, 64, True, torch.bfloat16],
    
    # # 异构头配置强化组
    # [512, 1, (16, 8), 64, 4, 8, 16, False, torch.float16],
    # [512, 4, (32, 8), 128, 8, 16, 32, False, torch.bfloat16],
    # [1024, 1, (32, 16), 256, 16, 4, 64, False, torch.float16],
    # [1024, 8, (16, 8), 64, 4, 4, 32, False, torch.bfloat16],
    # [2048, 1, (32, 8), 128, 8, 8, 64, False, torch.float16],
    # [2048, 4, (8, 8), 256, 16, 16, 32, False, torch.bfloat16],
    
    # # 大局部块配置组
    # [512, 1, (32, 32), 64, 16, 8, 16, True, torch.float16],
    # [1024, 4, (16, 16), 128, 16, 4, 32, True, torch.bfloat16],
    # [2048, 8, (8, 8), 256, 16, 8, 64, True, torch.float16],
    
    # # 大垂直步长配置组
    # [512, 8, (32, 8), 64, 4, 16, 32, True, torch.bfloat16],
    # ========
    [1024, 1, (16, 16), 128, 8, 16, 64, True, torch.float16],
    # ========
    # [2048, 4, (32, 32), 256, 4, 16, 32, True, torch.bfloat16],
    
    # # 混合参数极端组
    # [2048, 8, (32, 8), 256, 16, 16, 64, False, torch.float16],
    # [512, 1, (8, 8), 256, 4, 4, 64, False, torch.bfloat16],
    # [1024, 4, (16, 8), 64, 8, 8, 32, False, torch.float16],
    # ======
    [2048, 1, (32, 16), 128, 16, 4, 64, False, torch.bfloat16],
    # ======
    # [512, 8, (32, 32), 64, 4, 8, 16, False, torch.float16],
    # [1024, 1, (8, 8), 128, 8, 16, 32, False, torch.bfloat16],
    # [2048, 4, (16, 16), 256, 4, 4, 64, False, torch.float16],
    # [512, 4, (32, 8), 64, 16, 8, 32, False, torch.bfloat16],
    # [1024, 8, (16, 8), 256, 4, 8, 64, False, torch.float16],
    # [2048, 8, (8, 8), 128, 8, 4, 32, False, torch.bfloat16],

    # # 新增补充组（3）
    # [512, 2, (8, 16), 64, 4, 8, 32, True, torch.float16],
    # [512, 2, (16, 32), 64, 8, 4, 16, False, torch.bfloat16],
    # [512, 6, (32, 16), 128, 4, 16, 64, True, torch.float16],
]

# @pytest.mark.parametrize("max_seq_len", MAX_SEQ_LEN)
# @pytest.mark.parametrize("num_seqs", NUM_PREFILL_SEQS)
# @pytest.mark.parametrize("num_heads", NUM_HEADS)
# @pytest.mark.parametrize("head_size", HEAD_SIZES)
# @pytest.mark.parametrize("local_blocks", LOCAL_BLOCKS)
# @pytest.mark.parametrize("vert_stride", VERT_STRIDES)
# @pytest.mark.parametrize("block_size", BLOCK_SIZES)
# @pytest.mark.parametrize("homo_heads", HOMO_HEADS)
# @pytest.mark.parametrize("dtype", DTYPES)
# @pytest.mark.parametrize("device", NPU_DEVICES)
@pytest.mark.parametrize("test_case", TEST_CASES)
@torch.inference_mode()
def test_varlen_blocksparse_attention_prefill(
    # max_seq_len: int,
    # num_seqs: int,
    # num_heads: tuple[int, int],
    # head_size: int,
    # local_blocks: int,
    # vert_stride: int,
    # block_size: int,
    # homo_heads: bool,
    # dtype: torch.dtype,
    # device: str,
    test_case
) -> None:
    (max_seq_len, num_seqs, num_heads, head_size, local_blocks, vert_stride, block_size, homo_heads, dtype) = test_case
    # current_platform.seed_everything(seed)
    torch.set_default_device(device)
    # MAX_SEQ_LEN sometimes causes OOM in the reference implementation.
    # As the xformers library is already tested with its own tests, we can use
    # a smaller MAX_SEQ_LEN here.
    torch.manual_seed(42)
    np.random.seed(42) 
    random.seed(42)
    max_len = min(max_seq_len, 4096)
    seq_lens = random.sample(range(1, max_len), num_seqs)
    cu_seq_lens = torch.cumsum(torch.tensor([0] + seq_lens), dim=0)
    num_tokens = sum(seq_lens)

    scale = float(1.0 / (head_size**0.5))
    num_query_heads, num_kv_heads = num_heads
    assert num_query_heads % num_kv_heads == 0
    num_queries_per_kv = num_query_heads // num_kv_heads

    qkv = torch.empty(num_tokens,
                      num_query_heads + 2 * num_kv_heads,
                      head_size,
                      dtype=dtype)
    qkv.uniform_(-scale, scale)
    query, key, value = qkv.split(
        [num_query_heads, num_kv_heads, num_kv_heads], dim=1)

    output = torch.empty_like(query, device=device)

    block_sparse_attn(
        query, key, value, output, cu_seqlens_k=cu_seq_lens.to(device), max_seqlen=max_len, local_blocks=local_blocks,
        vert_stride=vert_stride, block_size=block_size, sm_scale=scale, data_type=dtype,
        homo_head=homo_heads, device=device
    )

    if num_queries_per_kv > 1:
        key = torch.repeat_interleave(key, num_queries_per_kv, dim=1)
        value = torch.repeat_interleave(value, num_queries_per_kv, dim=1)

    ref_output = ref_multi_query_kv_attention(
        cu_seq_lens.tolist(),
        query,
        key,
        value,
        scale,
        dtype,
    )
    # torch.testing.assert_close(output, ref_output, atol=1.5e-1, rtol=1.5e-1)
    sigtype_dict = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16",
    }
    sigtype = sigtype_dict[dtype]
    test_common.validate_cmp(sigtype, output, ref_output)