from typing import Optional
import pytest
import torch
import torch_npu
from unified_attn import unified_attention
import test_common
import random
import compare
import numpy as np
import precision_calcu

# SEQ_LENS 多组合
SEQ_LENS = [(1, 512), (1, 1024), (4, 512), (4, 1024)]
NUM_HEADS = [(8, 1), (16, 8), (32, 8), (32, 4)]
HEAD_SIZES = [64, 128]
BLOCK_SIZES = [16, 32, 64]
SLIDING_WINDOW = [None, 512, 1024]
SOFT_CAP = [None, 10.0, 20.0]
NUM_BLOCKS = [32, 64, 128]
DTYPES = [torch.float16, torch.bfloat16]

SEED = 42
DEVICE = "npu"

def ref_paged_attn_old(
    query: torch.Tensor,
    key_cache: torch.Tensor,
    value_cache: torch.Tensor,
    query_lens: torch.Tensor,
    kv_lens: torch.Tensor,
    block_tables: torch.Tensor,
    scale: float,
    sliding_window: Optional[int] = None,
    soft_cap: Optional[float] = None,
) -> torch.Tensor:
    dtype = query.dtype
    num_seqs = len(query_lens)
    block_tables = block_tables.numpy()
    _, block_size, num_kv_heads, head_size = key_cache.shape

    outputs: list[torch.Tensor] = []
    start_idx = 0
    for i in range(num_seqs):
        query_len = query_lens[i]
        kv_len = kv_lens[i]
        q = query[start_idx:start_idx + query_len]
        q *= scale

        num_kv_blocks = (kv_len + block_size - 1) // block_size
        block_indices = block_tables[i, :num_kv_blocks]

        k = key_cache[block_indices].view(-1, num_kv_heads, head_size)
        k = k[:kv_len]
        v = value_cache[block_indices].view(-1, num_kv_heads, head_size)
        v = v[:kv_len]

        if q.shape[1] != k.shape[1]:
            k = torch.repeat_interleave(k, q.shape[1] // k.shape[1], dim=1)
            v = torch.repeat_interleave(v, q.shape[1] // v.shape[1], dim=1)
        attn = torch.einsum("qhd,khd->hqk", q.to(torch.float32), k.to(torch.float32)).to(torch.float32)
        empty_mask = torch.ones(query_len, kv_len).cpu()
        mask = torch.triu(empty_mask, diagonal=kv_len - query_len + 1).bool().cpu()
        if sliding_window is not None:
            sliding_window_mask = torch.triu(empty_mask,
                                             diagonal=kv_len -
                                             (query_len + sliding_window) +
                                             1).bool().logical_not().cpu()
            mask |= sliding_window_mask
        if soft_cap is not None and soft_cap > 0:
            attn = soft_cap * torch.tanh(attn / soft_cap).to(torch.float32)
        attn.masked_fill_(mask, float("-inf"))
        attn = torch.softmax(attn, dim=-1).to(torch.float32)
        out = torch.einsum("hqk,khd->qhd", attn, v.to(torch.float32)).to(dtype)

        outputs.append(out)
        start_idx += query_len

    return torch.cat(outputs, dim=0)

def ref_paged_attn(
    query: torch.Tensor,
    key_cache: torch.Tensor,
    value_cache: torch.Tensor,
    query_lens: torch.Tensor,
    kv_lens: torch.Tensor,
    block_tables: torch.Tensor,
    scale: float,
    sliding_window: Optional[int] = None,
    soft_cap: Optional[float] = None,
) -> torch.Tensor:
    dtype = query.dtype
    num_seqs = len(query_lens)
    block_tables = block_tables.numpy()
    _, block_size, num_kv_heads, head_size = key_cache.shape

    outputs: list[torch.Tensor] = []
    start_idx = 0
    for i in range(num_seqs):
        query_len = query_lens[i]
        kv_len = kv_lens[i]
        q = query[start_idx:start_idx + query_len]
        q *= scale

        num_kv_blocks = (kv_len + block_size - 1) // block_size
        block_indices = block_tables[i, :num_kv_blocks]

        k = key_cache[block_indices].view(-1, num_kv_heads, head_size)
        k = k[:kv_len]
        v = value_cache[block_indices].view(-1, num_kv_heads, head_size)
        v = v[:kv_len]

        if q.shape[1] != k.shape[1]:
            k = torch.repeat_interleave(k, q.shape[1] // k.shape[1], dim=1)
            v = torch.repeat_interleave(v, q.shape[1] // v.shape[1], dim=1)
        attn = torch.einsum("qhd,khd->hqk", q, k)
        empty_mask = torch.ones(query_len, kv_len).cpu()
        mask = torch.triu(empty_mask, diagonal=kv_len - query_len + 1).bool().cpu()
        if sliding_window is not None:
            sliding_window_mask = torch.triu(empty_mask,
                                             diagonal=kv_len -
                                             (query_len + sliding_window) +
                                             1).bool().logical_not().cpu()
            mask |= sliding_window_mask
        if soft_cap is not None and soft_cap > 0:
            attn = soft_cap * torch.tanh(attn / soft_cap)
        attn.masked_fill_(mask, float("-inf"))
        attn = torch.softmax(attn, dim=-1)
        out = torch.einsum("hqk,khd->qhd", attn.to(v.dtype), v).to(dtype)

        outputs.append(out)
        start_idx += query_len

    return torch.cat(outputs, dim=0)

TEST_CASES = [
    # BUG 太多了 很多Shape无法通过  需要结合具体样例来进行详细分析
    # FIXME 需要找到哪个地方导致的UB没有对齐访问   
    # Date 2025.11.12 版本
    # (seq_lens, [num_query_heads, num_kv_heads], head_size, sliding_window, dtype, block_size, soft_cap, num_blocks) = test_case
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 2], 128, None, torch.float16, 16, 20.0, 32],
    [[(1, 1024), (4, 1024)], [8, 1], 128, None, torch.float16, 16, None, 32],
    [[(1, 1024), (4, 1024)], [32, 8], 64, 1024, torch.float16, 64, 20.0, 128],
    [[(1, 1024), (4, 1024)], [16, 4], 64, 512, torch.bfloat16, 16, None, 128],
    [[(1, 512), (4, 512)], [16, 16], 128, None, torch.float16, 32, None, 32],
    [[(1, 512), (4, 512)], [8, 4], 128, 512, torch.bfloat16, 16, 20.0, 64],
    [[(1, 1024), (4, 1024)], [8, 8], 128, None, torch.bfloat16, 64, 20.0, 64],
    [[(1, 1024), (4, 1024)], [8, 2], 64, None, torch.float16, 32, None, 32],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 2], 128, 512, torch.bfloat16, 16, 10.0, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 4], 128, 1024, torch.float16, 64, 20.0, 32],
    [[(1, 1024), (4, 1024)], [32, 16], 64, None, torch.bfloat16, 32, 10.0, 128],
    [[(1, 1024), (4, 1024)], [8, 1], 128, None, torch.float16, 16, 10.0, 64],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 4], 64, 1024, torch.bfloat16, 16, 20.0, 64],
    [[(1, 512), (4, 512)], [32, 32], 128, None, torch.bfloat16, 16, None, 128],
    [[(1, 1024), (4, 1024)], [32, 2], 128, 1024, torch.bfloat16, 64, 10.0, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [8, 4], 128, None, torch.float16, 16, None, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [16, 1], 128, 1024, torch.float16, 32, 10.0, 128],
    [[(1, 512), (4, 512)], [8, 8], 128, 1024, torch.float16, 64, 20.0, 32],
    [[(1, 1024), (4, 1024)], [16, 8], 128, None, torch.bfloat16, 32, None, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 16], 128, 1024, torch.float16, 64, None, 128],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 4], 64, None, torch.bfloat16, 16, 20.0, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 2], 128, 512, torch.float16, 16, 10.0, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [8, 2], 64, 1024, torch.float16, 16, 20.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 16], 64, None, torch.bfloat16, 64, None, 64],
    [[(1, 1024), (4, 1024)], [32, 8], 128, None, torch.float16, 64, 10.0, 64],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [16, 2], 128, None, torch.float16, 16, None, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 4], 64, 1024, torch.float16, 16, None, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 1], 64, None, torch.bfloat16, 16, 20.0, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 16], 128, None, torch.float16, 64, 20.0, 128],
    [[(1, 512), (4, 512)], [8, 4], 128, 512, torch.float16, 16, None, 128],
    [[(1, 512), (4, 512)], [16, 1], 128, 512, torch.bfloat16, 64, None, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 8], 128, 1024, torch.bfloat16, 16, None, 32],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 2], 128, None, torch.bfloat16, 16, 10.0, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [8, 2], 128, 1024, torch.float16, 16, 20.0, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 4], 64, None, torch.bfloat16, 32, 10.0, 32],
    [[(1, 512), (4, 512)], [8, 8], 64, 512, torch.float16, 32, 10.0, 64],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [16, 4], 128, None, torch.float16, 32, None, 32],
    # 这个访问头差距太大了
    [[(1, 1024), (4, 1024)], [16, 1], 64, 1024, torch.bfloat16, 16, None, 128],
    [[(1, 512), (4, 512)], [32, 16], 64, None, torch.float16, 16, None, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 8], 64, 512, torch.float16, 64, None, 128],
    [[(1, 1024), (4, 1024)], [8, 2], 64, 512, torch.bfloat16, 32, None, 128],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 1], 128, 512, torch.float16, 64, 20.0, 64],
    [[(1, 512), (4, 512)], [16, 8], 64, None, torch.bfloat16, 64, 20.0, 32],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 4], 64, 1024, torch.bfloat16, 16, 10.0, 32],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [16, 2], 128, None, torch.bfloat16, 64, 20.0, 64],
    [[(1, 1024), (4, 1024)], [32, 32], 64, 1024, torch.bfloat16, 64, None, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 4], 64, 1024, torch.float16, 32, 10.0, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 16], 128, None, torch.bfloat16, 64, 10.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 2], 128, 512, torch.float16, 16, 10.0, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 8], 128, 1024, torch.bfloat16, 64, None, 32],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 2], 64, 1024, torch.float16, 32, 20.0, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [16, 16], 64, None, torch.bfloat16, 32, None, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 8], 64, 1024, torch.float16, 32, 20.0, 64],
    [[(1, 1024), (4, 1024)], [32, 4], 64, None, torch.float16, 16, 10.0, 32],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 32], 128, 512, torch.float16, 32, None, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 4], 64, 512, torch.float16, 16, 10.0, 64],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 2], 64, None, torch.float16, 32, 10.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 8], 128, 1024, torch.bfloat16, 64, 20.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 1], 128, None, torch.bfloat16, 16, None, 128],
    [[(1, 512), (4, 512)], [16, 2], 128, 512, torch.float16, 32, 20.0, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 4], 128, None, torch.float16, 32, 10.0, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 1], 128, 1024, torch.float16, 64, 10.0, 128],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 8], 128, 512, torch.bfloat16, 64, 10.0, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [8, 8], 128, 1024, torch.bfloat16, 64, 20.0, 32],
    [[(1, 1024), (4, 1024)], [32, 16], 128, 512, torch.float16, 32, 10.0, 32],
    [[(1, 512), (4, 512)], [32, 4], 128, 512, torch.bfloat16, 32, 20.0, 32],
    [[(1, 1024), (4, 1024)], [16, 2], 64, 1024, torch.float16, 32, 20.0, 128],
    [[(1, 1024), (4, 1024)], [16, 16], 64, None, torch.bfloat16, 16, None, 32],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 4], 64, 512, torch.float16, 32, 10.0, 128],
    [[(1, 1024), (4, 1024)], [8, 2], 128, 512, torch.bfloat16, 16, None, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 8], 128, None, torch.float16, 64, 20.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 2], 64, None, torch.bfloat16, 16, 10.0, 128],
    [[(1, 1024), (4, 1024)], [32, 8], 128, 512, torch.bfloat16, 64, 10.0, 32],
    [[(1, 512), (4, 512)], [16, 4], 128, None, torch.bfloat16, 64, 10.0, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [16, 8], 128, None, torch.bfloat16, 16, 10.0, 64],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 4], 64, None, torch.float16, 16, 10.0, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 16], 64, None, torch.bfloat16, 32, 10.0, 128],
    [[(1, 512), (4, 512)], [16, 2], 64, None, torch.bfloat16, 32, 20.0, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [32, 8], 128, 512, torch.float16, 32, 10.0, 64],
    [[(1, 512), (4, 512)], [32, 4], 64, 512, torch.float16, 32, 10.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 1], 128, 1024, torch.bfloat16, 64, None, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 2], 64, 512, torch.float16, 16, 20.0, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [16, 4], 64, None, torch.bfloat16, 32, 10.0, 32],
    [[(1, 512), (4, 512)], [16, 8], 128, 512, torch.bfloat16, 32, 10.0, 32],
    [[(1, 512), (4, 512)], [8, 2], 64, 512, torch.float16, 64, None, 128],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [8, 4], 64, None, torch.float16, 64, None, 32],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [16, 2], 64, 1024, torch.float16, 32, 20.0, 64],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [8, 1], 64, None, torch.bfloat16, 16, 20.0, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 16], 128, 512, torch.float16, 16, 20.0, 128],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [8, 4], 128, 1024, torch.float16, 64, None, 64],
    [[(1, 1024), (4, 1024)], [16, 4], 128, None, torch.bfloat16, 16, 10.0, 64],
    [[(1, 512), (1, 1024), (4, 512), (4, 1024)], [16, 8], 128, 1024, torch.bfloat16, 64, None, 64],
    [[(1, 512), (1, 512), (4, 512), (4, 512)], [32, 4], 128, 1024, torch.bfloat16, 32, 10.0, 128],
    [[(1, 1024), (4, 1024)], [16, 2], 128, None, torch.float16, 32, 10.0, 32],
    [[(1, 512), (4, 512)], [32, 8], 128, 512, torch.float16, 32, 10.0, 32],
    [[(1, 512), (4, 512)], [8, 4], 128, 512, torch.bfloat16, 32, 20.0, 128],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 32], 64, 1024, torch.float16, 16, None, 128],
    [[(1, 512), (4, 512)], [16, 8], 64, 1024, torch.bfloat16, 64, 20.0, 64],
    [[(1, 512), (4, 512)], [8, 2], 64, 512, torch.float16, 32, 20.0, 32],
    [[(1, 1024), (1, 1024), (4, 1024), (4, 1024)], [32, 8], 128, 512, torch.bfloat16, 32, 20.0, 128],
]


# 存在VEC not aligned的问题

@pytest.mark.parametrize("test_case", TEST_CASES)
@torch.inference_mode()
def test_triton_unified_attn(
    test_case
) -> None:
    (seq_lens, num_heads, head_size, sliding_window,
     dtype, block_size, soft_cap, num_blocks) = test_case
    torch.set_default_device(DEVICE)
    torch.npu.set_device(1)
    torch.manual_seed(SEED)
    np.random.seed(SEED) 
    random.seed(SEED)
    num_seqs = len(seq_lens)
    query_lens = [x[0] for x in seq_lens]
    kv_lens = [x[1] for x in seq_lens]
    num_query_heads = num_heads[0]
    num_kv_heads = num_heads[1]
    assert num_query_heads % num_kv_heads == 0
    max_query_len = max(query_lens)
    max_kv_len = max(kv_lens)
    window_size = ((sliding_window - 1, 0) if sliding_window is not None else
                   (-1, -1))
    scale = head_size**-0.5

    sigtype_dict = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16",
    }
    sigtype = sigtype_dict[dtype]

    query = test_common.generate_tensor((sum(query_lens), num_query_heads, head_size) ,sigtype)
    key_cache = test_common.generate_tensor((num_blocks, block_size, num_kv_heads, head_size), sigtype)
    value_cache = test_common.generate_tensor((num_blocks, block_size, num_kv_heads, head_size), sigtype)
    cu_query_lens = torch.tensor([0] + query_lens,
                                 dtype=torch.int32).cumsum(dim=0,
                                                           dtype=torch.int32)

    query_lens = torch.tensor(query_lens, dtype=torch.int32)
    kv_lens = torch.tensor(kv_lens, dtype=torch.int32)

    max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
    block_tables = torch.randint(0,
                                 num_blocks,
                                 (num_seqs, max_num_blocks_per_seq),
                                 dtype=torch.int32)

    output = torch.empty_like(query, dtype=dtype)

    unified_attention(
        q=query,
        k=key_cache,
        v=value_cache,
        out=output,
        cu_seqlens_q=cu_query_lens,
        seqused_k=kv_lens,
        max_seqlen_q=max_query_len,
        max_seqlen_k=max_kv_len,
        softmax_scale=scale,
        window_size=window_size,
        block_table=block_tables,
        softcap=soft_cap if soft_cap is not None else 0,
    )

    
    ref_output = ref_paged_attn(
        query=query.to("cpu"),
        key_cache=key_cache.to("cpu"),
        value_cache=value_cache.to("cpu"),
        query_lens=query_lens.to("cpu"),
        kv_lens=kv_lens.to("cpu"),
        block_tables=block_tables.to("cpu"),
        scale=scale,
        sliding_window=sliding_window,
        soft_cap=soft_cap,
    )

    # test_common.validate_cmp(sigtype, output, ref_output)
    # passed = compare.check_operator_accuracy(ref_output, output)
    # assert passed == True
    passed = precision_calcu.compare_cv(ref_output.cpu(), output.cpu(), output.cpu())
    assert passed == True