from typing import TypeVar
from functools import lru_cache
import math
import pytest
import torch
import numpy as np
import random
import triton
import triton.language as tl
import test_common
import os
import compare
import precision_calcu
import block_sparse_attn

DEVICE = "npu"
SEED = 42

'''
Z_OPTIONS = [1, 2, 3, 4]
# 确保num_query_heads % num_kv_heads == 0的头配置组合
H_OPTIONS = [
    (4, 1), (4, 2), (4, 4),
    (8, 1), (8, 2), (8, 4), (8, 8),
    (16, 1), (16, 2), (16, 4), (16, 8), (16, 16),
    (32, 1), (32, 2), (32, 4), (32, 8), (32, 16), (32, 32)
]
N_CTX_OPTIONS = [512, 1024, 2048, 4096]
D_HEAD_OPTIONS = [16, 32, 64, 128]
HOMO_HEAD_OPTIONS = [True, False]
BLOCK_SIZE_OPTIONS = [16, 32, 64]
LOCAL_BLOCKS_OPTIONS = [1, 2, 3, 4]
VERT_STRIDE_OPTIONS = [1, 2, 3, 4]
DTYPE_OPTIONS = [torch.float16, torch.bfloat16]
'''

TEST_CASES = [
    # 基础小规模配置（512上下文）
    [1, (4, 1), 512, 1, 512, True, 256, 1, 1, torch.float16],
    # [2, (4, 2), 512, None, 16, False, 16, 1, 2, torch.bfloat16],
    # [3, (4, 4), 512, 1, 32, True, 32, 2, 1, torch.float16],
    # [4, (8, 1), 512, None, 32, False, 32, 2, 2, torch.bfloat16],
    # [1, (8, 2), 512, 1, 64, True, 64, 3, 3, torch.float16],
    # [2, (8, 4), 512, None, 64, False, 16, 3, 4, torch.bfloat16],
    # [3, (8, 8), 512, 1, 128, True, 16, 4, 1, torch.float16],
    # [4, (16, 1), 512, None, 128, False, 32, 4, 2, torch.bfloat16],
    # [1, (16, 2), 512, 1, 16, True, 32, 1, 3, torch.float16],
    # [2, (16, 4), 512, None, 32, False, 64, 1, 4, torch.bfloat16],
    # [3, (16, 8), 512, 1, 64, True, 64, 2, 1, torch.float16],
    # [4, (16, 16), 512, None, 128, False, 16, 2, 2, torch.bfloat16],
    # [1, (32, 1), 512, 1, 16, True, 16, 3, 3, torch.float16],
    # [2, (32, 2), 512, None, 32, False, 32, 3, 4, torch.bfloat16],
    # [3, (32, 4), 512, 1, 64, True, 32, 4, 1, torch.float16],
    # [4, (16, 8), 512, None, 64, True, 64, 4, 2, torch.bfloat16],
    # [1, (32, 16), 512, 1, 16, True, 64, 1, 3, torch.float16],
    # [2, (32, 32), 512, None, 32, False, 16, 1, 4, torch.bfloat16],
    # [3, (4, 1), 512, 1, 64, True, 32, 2, 1, torch.float16],
    # [4, (4, 2), 512, None, 128, False, 32, 2, 2, torch.bfloat16],

    # # 中等规模配置（1024上下文）
    # [1, (8, 1), 1024, None, 16, True, 16, 3, 3, torch.float16],
    # [2, (8, 2), 1024, 1, 32, False, 16, 3, 4, torch.bfloat16],
    # [3, (8, 4), 1024, None, 64, True, 64, 4, 1, torch.float16],
    # [4, (8, 8), 1024, 1, 128, False, 64, 4, 2, torch.bfloat16],
    # [1, (16, 1), 1024, None, 16, True, 16, 1, 3, torch.float16],
    # [2, (16, 2), 1024, 1, 32, False, 32, 1, 4, torch.bfloat16],
    # [3, (16, 4), 1024, None, 64, True, 32, 2, 1, torch.float16],
    # [4, (16, 8), 1024, 1, 128, False, 64, 2, 2, torch.bfloat16],
    # # ==== TEST_CASES 28
    # [1, (16, 8), 1024, None, 16, True, 64, 3, 3, torch.float16],
    # [2, (32, 1), 1024, 1, 32, False, 16, 3, 4, torch.bfloat16],
    # [3, (32, 2), 1024, None, 64, True, 16, 4, 1, torch.float16],
    # [4, (32, 4), 1024, 1, 128, False, 32, 4, 2, torch.bfloat16],
    # # ==== TEST_CASES 32
    # [1, (16, 8), 1024, None, 16, True, 32, 1, 3, torch.float16],
    # [2, (32, 16), 1024, 1, 32, False, 64, 1, 4, torch.bfloat16],
    # [3, (32, 32), 1024, None, 64, True, 64, 2, 1, torch.float16],
    # [4, (4, 1), 1024, 1, 128, False, 16, 2, 2, torch.bfloat16],
    # [1, (4, 2), 1024, None, 16, True, 16, 3, 3, torch.float16],
    # [2, (4, 4), 1024, 1, 32, False, 32, 3, 4, torch.bfloat16],
    # [3, (8, 1), 1024, None, 64, True, 32, 4, 1, torch.float16],
    # [4, (8, 2), 1024, 1, 128, False, 64, 4, 2, torch.bfloat16],

    # # 大规模配置（2048上下文）
    # [1, (16, 1), 2048, 1, 16, True, 16, 1, 1, torch.float16],
    # [2, (16, 2), 2048, None, 32, False, 16, 1, 2, torch.bfloat16],
    # [3, (16, 4), 2048, 1, 64, True, 32, 2, 1, torch.float16],
    # [4, (16, 8), 2048, None, 128, False, 32, 2, 2, torch.bfloat16],
    # [1, (16, 16), 2048, 1, 16, True, 64, 3, 3, torch.float16],
    # [2, (32, 1), 2048, None, 32, False, 64, 3, 4, torch.bfloat16],
    # [3, (32, 2), 2048, 1, 64, True, 16, 4, 1, torch.float16],
    # [4, (32, 4), 2048, None, 128, False, 16, 4, 2, torch.bfloat16],
    # [1, (32, 8), 2048, 1, 16, True, 32, 1, 3, torch.float16],
    # [2, (32, 16), 2048, None, 32, False, 32, 1, 4, torch.bfloat16],
    # [3, (32, 32), 2048, 1, 64, True, 64, 2, 1, torch.float16],
    # # === TEST CASES 51
    # [4, (4, 1), 2048, None, 64, True, 64, 2, 2, torch.bfloat16],
    # [1, (4, 2), 2048, 1, 16, True, 16, 3, 3, torch.float16],
    # [2, (4, 4), 2048, None, 32, False, 16, 3, 4, torch.bfloat16],
    # [3, (8, 1), 2048, 1, 64, True, 32, 4, 1, torch.float16],
    # [4, (8, 2), 2048, None, 128, False, 32, 4, 2, torch.bfloat16],
    # [1, (8, 4), 2048, 1, 16, True, 64, 1, 3, torch.float16],
    # [2, (8, 8), 2048, None, 32, False, 64, 1, 4, torch.bfloat16],
    # [3, (16, 1), 2048, 1, 64, True, 16, 2, 1, torch.float16],
    # [4, (16, 2), 2048, None, 128, False, 16, 2, 2, torch.bfloat16],

    # # 超大上下文配置（4096上下文）
    # # TEST CASES 60
    # [1, (4, 4), 4096, None, 16, True, 32, 3, 3, torch.float16],
    # [2, (16, 8), 4096, 1, 32, False, 32, 3, 4, torch.bfloat16],
    # [3, (16, 16), 4096, None, 64, True, 64, 4, 1, torch.float16],
    # [4, (32, 1), 4096, 1, 128, False, 64, 4, 2, torch.bfloat16],
    # # ==== TEST CASES 64
    # [1, (16, 8), 4096, None, 16, True, 16, 1, 3, torch.float16],
    # [2, (32, 4), 4096, 1, 32, False, 16, 1, 4, torch.bfloat16],
    # # ==== TEST CASES 66
    # [3, (32, 16), 4096, None, 64, True, 32, 2, 1, torch.float16],
    # [4, (32, 16), 4096, 1, 128, False, 32, 2, 2, torch.bfloat16],
    # [1, (32, 32), 4096, None, 16, True, 64, 3, 3, torch.float16],
    # [2, (4, 1), 4096, 1, 32, False, 64, 3, 4, torch.bfloat16],
    # [3, (4, 2), 4096, None, 64, True, 16, 4, 1, torch.float16],
    # [4, (4, 4), 4096, 1, 128, False, 16, 4, 2, torch.bfloat16],
    # [1, (8, 1), 4096, None, 16, True, 32, 1, 3, torch.float16],
    # [2, (8, 2), 4096, 1, 32, False, 32, 1, 4, torch.bfloat16],
    # [3, (8, 4), 4096, None, 64, True, 64, 2, 1, torch.float16],
    # [4, (8, 8), 4096, 1, 128, False, 64, 2, 2, torch.bfloat16],
    # # ==== TEST CASES 76
    # [1, (16, 16), 4096, None, 16, True, 16, 3, 3, torch.float16],
    # [2, (16, 2), 4096, 1, 32, False, 16, 3, 4, torch.bfloat16],
    # [3, (16, 16), 4096, None, 64, True, 32, 4, 1, torch.float16],
    # [4, (16, 8), 4096, 1, 128, False, 32, 4, 2, torch.bfloat16],

    # # 混合扩展配置
    # [1, (32, 1), 2048, 1, 64, True, 64, 2, 3, torch.float16],
    # [2, (32, 2), 1024, None, 128, False, 16, 3, 1, torch.bfloat16],
    # [3, (16, 8), 512, 1, 32, True, 32, 4, 2, torch.float16],
    # [4, (8, 4), 4096, None, 16, False, 64, 1, 4, torch.bfloat16],
    # [1, (4, 1), 2048, 1, 128, True, 16, 2, 1, torch.float16],
    # [2, (8, 2), 1024, None, 64, False, 32, 3, 2, torch.bfloat16],
    # [3, (16, 4), 512, 1, 16, True, 64, 4, 3, torch.float16],
    # [4, (32, 16), 4096, None, 32, False, 16, 1, 4, torch.bfloat16],
    # [1, (32, 32), 2048, 1, 64, True, 32, 2, 1, torch.float16],
    # # ==== TEST CASES 64
    # [2, (4, 4), 1024, None, 64, True, 64, 3, 2, torch.bfloat16],
    # [3, (8, 8), 512, 1, 16, True, 16, 4, 3, torch.float16],
    # [4, (16, 16), 4096, None, 32, False, 32, 1, 4, torch.bfloat16],
    # [1, (4, 2), 2048, 1, 64, True, 64, 2, 1, torch.float16],
    # [2, (8, 1), 1024, None, 128, False, 16, 3, 2, torch.bfloat16],
    # [3, (16, 2), 512, 1, 16, True, 32, 4, 3, torch.float16],
    # [4, (32, 4), 4096, None, 32, False, 64, 1, 4, torch.bfloat16],
    # [1, (32, 8), 2048, 1, 64, True, 16, 2, 1, torch.float16],
    # [2, (4, 1), 1024, None, 128, False, 32, 3, 2, torch.bfloat16],
    # [3, (8, 4), 512, 1, 16, True, 64, 4, 3, torch.float16],
    # [4, (16, 1), 4096, None, 32, False, 16, 1, 4, torch.bfloat16]
]

def torch_attention(q, k, v, attn_mask=None, sm_scale=None, block_attn_mask=None, block_size=128, do=None):
    dtype = q.dtype
    if sm_scale is None:
        sm_scale = 1.0 / math.sqrt(float(q.size(-1)))

    if block_attn_mask is not None:
        assert attn_mask is None
        outs = []
        for s in range(0, q.size(2), block_size):
            e = min(s + block_size, q.size(2))
            q_block = q[:, :, s:e]
            attn = torch.einsum('bhmd,bhnd->bhmn', q_block.to(torch.float32), k[:, :, :e].to(torch.float32)).to(torch.float32) * sm_scale
            mask = block_attn_mask[..., s // block_size, : (s // block_size + 1)]
            mask = torch.kron(mask, torch.ones(block_size, block_size, device=mask.device))
            mask[..., :, s:].masked_fill_(torch.arange(0, block_size)[:, None] <= torch.arange(0, block_size)[None, :], 0)
            attn = attn.masked_fill((1 - mask).bool(), float('-inf'))
            attn = attn.softmax(-1)
            out = torch.einsum('bhmn,bhnd->bhmd', attn, v[:, :, :e].to(torch.float32))
            outs.append(out.to(dtype))
        torch_output = torch.cat(outs, dim=2)
    else:
        attn = torch.einsum('bhmd,bhnd->bhmn', q.to(torch.float32), k.to(torch.float32)).to(torch.float32) * sm_scale
        if attn_mask is not None:
            attn = attn.masked_fill((1 - attn_mask).bool(), float('-inf'))

        attn = attn.softmax(-1)
        if do is not None:
            dv = torch.einsum('bhqk,bhqd->bhkd', attn, do.to(torch.float32))
            print(f'> torch_attn computed dv: {dv=}')
        torch_output = torch.einsum('bhmn,bhnd->bhmd', attn, v.to(torch.float32)).to(dtype)
    return torch_output


@pytest.mark.parametrize('test_case', TEST_CASES)
@torch.inference_mode()
def test_block_sparse_attn(
    test_case,
):
    (Z, H, N_CTX, Q_LEN, D_HEAD, HOMO_HEAD, BLOCK_SIZE, LOCAL_BLOCKS, VERT_STRIDE, DTYPE) = test_case
    Q_LEN = N_CTX if Q_LEN is None else Q_LEN # 可以不一样长度
    torch.set_default_device(DEVICE)
    torch.npu.set_device(3)
    torch.manual_seed(SEED)
    np.random.seed(SEED) 
    random.seed(SEED)
    scale = 1.0 / math.sqrt(float(D_HEAD))

    sigtype_list = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16"
    }
    sigtype = sigtype_list[DTYPE]
    
    num_query_heads = H[0]
    num_kv_heads = H[1]

    assert num_query_heads % num_kv_heads == 0

    q = test_common.generate_tensor((Z, num_query_heads, Q_LEN, D_HEAD), sigtype).uniform_(0.5, 3.0)
    k = test_common.generate_tensor((Z, num_kv_heads, N_CTX, D_HEAD), sigtype).uniform_(0.5, 3.0)
    v = test_common.generate_tensor((Z, num_kv_heads, N_CTX, D_HEAD), sigtype).uniform_(0.5, 3.0)

    mask_csr, _, mask_dense = block_sparse_attn.get_sparse_attn_mask(
        q, N_CTX, block_size=BLOCK_SIZE,
        local_blocks=LOCAL_BLOCKS, 
        vert_stride=VERT_STRIDE, 
        homo_head=HOMO_HEAD, 
        return_dense=True
    )

    q_k_ratio = num_query_heads // num_kv_heads

    tri_out = block_sparse_attn.block_sparse_attn(
        q, k, v, N_CTX=N_CTX,
        BLOCK_SIZE=BLOCK_SIZE,
        LOCAL_BLOCKS=LOCAL_BLOCKS,
        VERT_STRIDE=VERT_STRIDE,
        HOMO_HEAD=HOMO_HEAD,
        sm_scale=scale
    )

    if q_k_ratio > 1:
        k = torch.repeat_interleave(k, q_k_ratio, dim=1)
        v = torch.repeat_interleave(v, q_k_ratio, dim=1)

    ref_out = torch_attention(q, k, v, mask_dense, scale)

    passed = compare.check_operator_accuracy(ref_out, tri_out)
    assert passed["result_check"].item() == True