from typing import Optional
import pytest
import torch
import torch_npu
from unified_attn import unified_attn
import test_common
import random
import compare
import numpy as np
import math

DEVICE = "npu"
SEED = 20

# TEST_CASES = [
#     [2, 1, (512, 1024), [8, 4], 64, 64, 32, 128, 20.0, torch.float16]
# ]

TEST_CASES = [
    # 格式: [num_seqs, q_len, (kv_lens...), [num_query_heads, num_kv_heads], head_size, num_blocks, block_size, sliding_window, soft_cap, dtype]
    [1, 64, (512,), [8, 4], 64, 64, 32, 128, None, torch.float16],
    [1, 128, (1024,), [16, 4], 64, 64, 64, 128, 10.0, torch.bfloat16],
    [2, 128, (512, 1024), [8, 4], 64, 64, 32, 128, 20.0, torch.float16],
    [3, 128, (512, 1024, 2048), [4, 4], 64, 64, 32, 128, None, torch.bfloat16],
    [2, 1, (512, 2048), [12, 4], 64, 64, 32, 128, 20.0, torch.float16],
    [3, 64, (512, 1024, 2048), [8, 8], 64, 64, 32, 128, 20.0, torch.bfloat16],
    [2, 64, (128, 1024), [16, 4], 64, 64, 32, 128, 20.0, torch.float16],
    [2, 64, (512, 1024), [16, 16], 64, 64, 32, 128, 20.0, torch.bfloat16],
    [1, 256, (2048,), [8, 4], 32, 64, 64, None, None, torch.float16],
    [2, 512, (4096, 3072), [16, 8], 64, 128, 128, 512, 10.0, torch.bfloat16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 16], 32, 256, 64, 128, None, torch.float16],
    [1, 64, (512,), [4, 4], 128, 64, 64, None, 15.0, torch.bfloat16],
    [3, 256, (2048, 3072, 4096), [16, 8], 32, 128, 128, 256, None, torch.float16],
    [2, 1024, (4096, 3072), [32, 16], 64, 256, 64, 1024, 20.0, torch.bfloat16],
    [4, 128, (1024, 2048, 1536, 2560), [16, 4], 32, 64, 128, None, None, torch.float16],
    [1, 512, (2048,), [32, 8], 128, 128, 64, 512, 5.0, torch.bfloat16],
    [3, 64, (512, 1024, 1536), [8, 4], 32, 256, 64, None, None, torch.float16],
    [2, 256, (2048, 3072), [16, 8], 64, 64, 128, 256, 25.0, torch.bfloat16],
    [1, 1024, (4096,), [32, 16], 32, 128, 64, None, None, torch.float16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 8], 64, 256, 128, 128, 10.0, torch.bfloat16],
    [3, 64, (512, 1024, 1536), [16, 4], 32, 64, 64, None, None, torch.float16],
    [1, 256, (2048,), [8, 4], 128, 128, 64, 256, 30.0, torch.bfloat16],
    [2, 512, (2048, 4096), [32, 16], 32, 256, 128, None, None, torch.float16],
    [4, 1024, (4096, 3072, 2048, 1536), [16, 8], 64, 64, 64, 1024, 15.0, torch.bfloat16],
    [1, 64, (512,), [8, 4], 32, 128, 128, None, None, torch.float16],
    [3, 256, (2048, 1024, 3072), [32, 8], 64, 256, 64, 256, None, torch.bfloat16],
    [2, 512, (3072, 4096), [16, 4], 32, 64, 64, None, 20.0, torch.float16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 16], 128, 128, 64, 128, None, torch.bfloat16],
    [1, 256, (2048, 3072), [16, 8], 32, 256, 128, None, 25.0, torch.float16],
    [3, 1024, (4096, 3072, 2048), [32, 16], 64, 64, 64, 1024, None, torch.bfloat16],
    [2, 64, (512, 1024), [8, 4], 32, 128, 128, None, None, torch.float16],
    [4, 256, (2048, 4096, 1536, 3072), [16, 8], 64, 256, 64, 256, 10.0, torch.bfloat16],
    [1, 512, (3072, 4096), [32, 16], 32, 64, 64, None, None, torch.float16],
    [3, 128, (1024, 2048, 1536), [16, 4], 128, 128, 64, 128, 30.0, torch.bfloat16],
    [2, 256, (2048, 1024), [8, 4], 32, 256, 128, None, None, torch.float16],
    [4, 1024, (4096, 3072, 2048, 1536), [32, 8], 64, 64, 64, 1024, 15.0, torch.bfloat16],
    [1, 64, (512, 1024, 1536), [16, 8], 32, 128, 64, None, None, torch.float16],
    [3, 512, (2048, 3072, 4096), [32, 16], 64, 256, 128, 512, 20.0, torch.bfloat16],
    [2, 128, (1024, 2048), [8, 4], 32, 64, 64, None, None, torch.float16],
    [4, 256, (2048, 3072, 1536, 4096), [16, 8], 128, 128, 64, 256, None, torch.bfloat16],
    [1, 1024, (4096, 3072), [32, 16], 32, 256, 128, None, 25.0, torch.float16],
    [2, 64, (512, 1024), [8, 4], 64, 64, 64, 64, None, torch.bfloat16],
    [2, 512, (2048, 4096), [16, 8], 32, 128, 128, None, 10.0, torch.float16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 16], 64, 256, 64, 128, None, torch.bfloat16],
    [1, 256, (2048, 1536), [16, 4], 32, 64, 128, None, 30.0, torch.float16],
    [2, 1024, (4096, 3072), [32, 8], 64, 128, 64, 1024, None, torch.bfloat16],
    [2, 64, (512, 1024, 1536), [8, 4], 32, 256, 64, None, 5.0, torch.float16],
    [4, 256, (2048, 3072, 1536, 4096), [16, 8], 32, 64, 128, 256, None, torch.bfloat16],
    [1, 512, (2048, 4096), [32, 16], 64, 128, 64, None, 15.0, torch.float16],
    [3, 128, (1024, 2048, 1536), [16, 4], 32, 256, 128, 128, None, torch.bfloat16],
    [1, 1024, (4096,), [32, 8], 32, 64, 64, None, 20.0, torch.float16],
    [4, 64, (512, 1024, 1536, 2560), [8, 4], 64, 128, 128, 64, None, torch.bfloat16],
    [1, 256, (2048, 3072), [16, 8], 32, 256, 64, None, None, torch.float16],
    [3, 512, (2048, 4096, 3072), [32, 16], 128, 64, 64, 512, 25.0, torch.bfloat16],
    [2, 128, (1024, 2048, 1536), [8, 4], 32, 128, 128, None, None, torch.float16],
    [4, 1024, (4096, 3072, 2048, 1536), [32, 8], 64, 256, 64, 1024, 10.0, torch.bfloat16],
    [1, 64, (512, 1024), [16, 8], 32, 64, 128, None, None, torch.float16],
    [3, 256, (2048, 1536, 3072), [8, 4], 64, 128, 64, 256, 30.0, torch.bfloat16],
    [2, 512, (2048, 4096), [16, 8], 32, 256, 128, None, None, torch.float16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 16], 32, 64, 64, 128, 15.0, torch.bfloat16],
    [1, 1024, (4096, 3072), [32, 8], 64, 128, 128, None, None, torch.float16],
    [3, 64, (512, 1024, 1536), [8, 4], 32, 256, 64, 64, None, torch.bfloat16],
    [2, 256, (2048, 3072), [16, 8], 128, 64, 64, None, 20.0, torch.float16],
    [4, 512, (2048, 4096, 3072, 1536), [32, 16], 32, 128, 128, 512, None, torch.bfloat16],
    [1, 128, (1024, 2048), [16, 4], 32, 256, 128, None, 5.0, torch.float16],
    [1, 1024, (4096,), [32, 8], 64, 64, 64, 1024, None, torch.bfloat16],
    [2, 64, (512, 1024), [8, 4], 32, 128, 128, None, 25.0, torch.float16],
    [4, 256, (2048, 3072, 1536, 4096), [16, 8], 64, 256, 64, 256, None, torch.bfloat16],
    [1, 512, (2048, 4096), [32, 16], 32, 64, 128, None, 10.0, torch.float16],
    [3, 128, (1024, 2048, 1536), [16, 4], 64, 128, 64, 128, None, torch.bfloat16],
    [2, 1024, (4096, 3072), [32, 8], 32, 256, 64, None, 30.0, torch.float16],
    [4, 64, (512, 1024, 1536, 2560), [8, 4], 32, 64, 128, 64, None, torch.bfloat16],
    [1, 256, (2048, 1536), [16, 8], 64, 128, 128, None, 15.0, torch.float16],
    [2, 512, (2048, 4096), [32, 16], 32, 256, 64, 512, None, torch.bfloat16],
    [2, 128, (1024, 2048), [8, 4], 32, 64, 64, None, None, torch.float16],
    [4, 1024, (4096, 3072, 2048, 1536), [32, 8], 128, 128, 64, 1024, 20.0, torch.bfloat16],
    [1, 64, (512, 1024), [16, 8], 32, 256, 128, None, None, torch.float16],
    [3, 256, (2048, 3072, 1536), [8, 4], 64, 64, 64, 256, 25.0, torch.bfloat16],
    [2, 512, (2048, 4096), [16, 8], 32, 128, 128, None, None, torch.float16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 16], 64, 256, 64, 128, 10.0, torch.bfloat16],
    [1, 1024, (4096,), [32, 8], 32, 64, 128, None, None, torch.float16],
    [3, 64, (512, 1024, 1536), [8, 4], 64, 128, 64, 64, 30.0, torch.bfloat16],
    [2, 256, (2048, 3072), [16, 8], 32, 256, 128, None, None, torch.float16],
    [4, 512, (2048, 4096, 3072, 1536), [32, 16], 32, 64, 64, 512, 5.0, torch.bfloat16],
    [1, 128, (1024, 2048), [16, 4], 64, 128, 128, None, None, torch.float16],
    [2, 1024, (4096, 3072), [32, 8], 32, 256, 64, 1024, 15.0, torch.bfloat16],
    [2, 64, (512, 1024), [8, 4], 32, 64, 64, None, None, torch.float16],
    [4, 256, (2048, 3072, 1536, 4096), [16, 8], 64, 128, 128, 256, 20.0, torch.bfloat16],
    [1, 512, (2048, 4096), [32, 16], 32, 256, 64, None, None, torch.float16],
    [3, 128, (1024, 2048, 1536), [16, 4], 128, 64, 64, 128, 25.0, torch.bfloat16],
    [2, 1024, (4096, 3072), [32, 8], 64, 128, 128, None, None, torch.float16],
    [4, 64, (512, 1024, 1536, 2560), [8, 4], 32, 256, 64, 64, 10.0, torch.bfloat16],
    [1, 256, (2048, 1536), [16, 8], 32, 64, 128, None, None, torch.float16],
    [2, 512, (2048, 4096), [32, 16], 64, 128, 64, 512, 30.0, torch.bfloat16],
    [2, 128, (1024, 2048), [8, 4], 32, 256, 128, None, None, torch.float16],
    [1, 1024, (4096,), [32, 8], 32, 64, 64, 1024, 15.0, torch.bfloat16],
    [2, 64, (512, 1024), [16, 8], 64, 128, 128, None, None, torch.float16],
    [3, 256, (2048, 3072, 1536), [8, 4], 32, 256, 64, 256, 5.0, torch.bfloat16],
    [2, 512, (2048, 4096), [16, 8], 32, 64, 128, None, None, torch.float16],
    [4, 128, (1024, 2048, 1536, 2560), [32, 16], 64, 128, 64, 128, 20.0, torch.bfloat16]
]

def torch_attention(
    q, k, v, kv_lens, block_table,
    scale, sliding_window, soft_cap
):
    dtype = q.dtype
    # num_seqs = q.shape[0]
    num_seqs, q_len, num_query_heads, head_size = q.shape
    _, block_size, num_kv_heads, head_size = k.shape

    outputs = []
    for i in range(num_seqs):
        kv_len = kv_lens[i]
        num_kv_blocks = (kv_len + block_size - 1) // block_size
        block_indices = block_table[i, :num_kv_blocks]
        k_i = k[block_indices]
        v_i = v[block_indices]

        k_new = k_i.view(-1, num_kv_heads, head_size)
        k_j = k_new[:kv_len]
        v_new = v_i.view(-1, num_kv_heads, head_size)
        v_j = v_new[:kv_len]
        q_i = q[i].view(-1, num_query_heads, head_size)
        
        if q_i.shape[1] != k_j.shape[1]:
            k_j = torch.repeat_interleave(k_j, q_i.shape[1] // k_j.shape[1], dim=1)
            v_j = torch.repeat_interleave(v_j, q_i.shape[1] // v_j.shape[1], dim=1)

        attn = torch.einsum("mhd,nhd->hmn", q_i.to(torch.float32), k_j.to(torch.float32)).to(torch.float32) * scale
        # 获取三角形掩码
        empty_mask = torch.ones(q_len, kv_len)
        mask = torch.triu(empty_mask, diagonal=kv_len - q_len + 1).bool()
        # 先进行 soft_cap操作
        if soft_cap is not None and soft_cap > 0:
            attn = soft_cap * torch.tanh(attn / soft_cap).to(torch.float32)
        
        # 添加sliding_window掩码
        if sliding_window is not None:
            sliding_window_mask = torch.triu(empty_mask,
                                             diagonal=kv_len -
                                             (q_len + sliding_window) +
                                             1).bool().logical_not()
            mask |= sliding_window_mask

        attn.masked_fill_(mask, float("-inf"))
        attn = torch.softmax(attn, dim=-1).to(torch.float32)
        output_i = torch.einsum('hmn,nhd->mhd', attn, v_j.to(torch.float32)).to(dtype)
        outputs.append(output_i)

    return torch.cat(outputs, dim=0)



@pytest.mark.parametrize("test_case", TEST_CASES)
@torch.inference_mode()
def test_unified_attn(
    test_case
):
    (num_seqs, q_len, kv_lens, num_heads, head_size, num_blocks, block_size, sliding_window, soft_cap, dtype) = test_case
    torch.set_default_device(DEVICE)
    torch.npu.set_device(1)
    torch.manual_seed(SEED)
    np.random.seed(SEED) 
    random.seed(SEED)
    
    num_query_heads = num_heads[0]
    num_kv_heads = num_heads[1]
    
    max_kv_len = max(kv_lens)

    window_size = ((sliding_window - 1, 0) if sliding_window is not None else (-1, -1))

    scale = 1.0 / math.sqrt(float(head_size))

    sigtype_dict = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16",
    }
    sigtype = sigtype_dict[dtype]

    query = test_common.generate_tensor((num_seqs, q_len, num_query_heads, head_size), sigtype).uniform_(0.5, 3.0)
    key_cache = test_common.generate_tensor((num_blocks, block_size, num_kv_heads, head_size), sigtype).uniform_(0.5, 3.0)
    value_cache = test_common.generate_tensor((num_blocks, block_size, num_kv_heads, head_size), sigtype).uniform_(0.5, 3.0)
    
    max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
    block_table = torch.randint(0, num_blocks,
                                 (num_seqs, max_num_blocks_per_seq),
                                 dtype=torch.int32)
    kv_lens = torch.tensor(kv_lens, dtype=torch.int32)
    soft_cap = float(soft_cap) if soft_cap is not None else float(0.0)
    tri_out = unified_attn(
        query, key_cache, value_cache, kv_lens, block_table,
        scale, window_size, soft_cap,
    )

    ref_out = torch_attention(query, key_cache, value_cache, kv_lens, block_table, scale, sliding_window, soft_cap)
    tri_out = tri_out.flatten(0, 1) 
    passed = compare.check_operator_accuracy(ref_out, tri_out)
    assert passed["result_check"] == True