import random
import torch
import torch_npu
import numpy as np
import sys
import os

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../precision"))
import test_common

TEST_CASE_HEADER = ['batch_size', 'num_heads', 'k_len', 'q_len', 'head_size', 'homo_head', 'block_size', 'local_blocks', 'vert_strides', 'dtype']

TEST_CASES = [
    # 基础小规模配置（512上下文）
    [1, (4, 1), 512, 1, 16, True, 16, 1, 1, torch.float16],
    [2, (4, 2), 512, None, 16, False, 16, 1, 2, torch.bfloat16],
    [3, (4, 4), 512, 1, 32, True, 32, 2, 1, torch.float16],
    [4, (8, 1), 512, None, 32, False, 32, 2, 2, torch.bfloat16],
    [1, (8, 2), 512, 1, 64, True, 64, 3, 3, torch.float16],
    [2, (8, 4), 512, None, 64, False, 16, 3, 4, torch.bfloat16],
    [3, (8, 8), 512, 1, 128, True, 16, 4, 1, torch.float16],
    [4, (16, 1), 512, None, 128, False, 32, 4, 2, torch.bfloat16],
    [1, (16, 2), 512, 1, 16, True, 32, 1, 3, torch.float16],
    [2, (16, 4), 512, None, 32, False, 64, 1, 4, torch.bfloat16],
    [3, (16, 8), 512, 1, 64, True, 64, 2, 1, torch.float16],
    [4, (16, 16), 512, None, 128, False, 16, 2, 2, torch.bfloat16],
    [1, (32, 1), 512, 1, 16, True, 16, 3, 3, torch.float16],
    [2, (32, 2), 512, None, 32, False, 32, 3, 4, torch.bfloat16],
    [3, (32, 4), 512, 1, 64, True, 32, 4, 1, torch.float16],
    [4, (16, 8), 512, None, 64, True, 64, 4, 2, torch.bfloat16],
    [1, (32, 16), 512, 1, 16, True, 64, 1, 3, torch.float16],
    [2, (32, 32), 512, None, 32, False, 16, 1, 4, torch.bfloat16],
    [3, (4, 1), 512, 1, 64, True, 32, 2, 1, torch.float16],
    [4, (4, 2), 512, None, 128, False, 32, 2, 2, torch.bfloat16],

    # 中等规模配置（1024上下文）
    [1, (8, 1), 1024, None, 16, True, 16, 3, 3, torch.float16],
    [2, (8, 2), 1024, 1, 32, False, 16, 3, 4, torch.bfloat16],
    [3, (8, 4), 1024, None, 64, True, 64, 4, 1, torch.float16],
    [4, (8, 8), 1024, 1, 128, False, 64, 4, 2, torch.bfloat16],
    [1, (16, 1), 1024, None, 16, True, 16, 1, 3, torch.float16],
    [2, (16, 2), 1024, 1, 32, False, 32, 1, 4, torch.bfloat16],
    [3, (16, 4), 1024, None, 64, True, 32, 2, 1, torch.float16],
    [4, (16, 8), 1024, 1, 128, False, 64, 2, 2, torch.bfloat16],
    [1, (16, 8), 1024, None, 16, True, 64, 3, 3, torch.float16],
    [2, (32, 1), 1024, 1, 32, False, 16, 3, 4, torch.bfloat16],
    [3, (32, 2), 1024, None, 64, True, 16, 4, 1, torch.float16],
    [4, (32, 4), 1024, 1, 128, False, 32, 4, 2, torch.bfloat16],
    [1, (16, 8), 1024, None, 16, True, 32, 1, 3, torch.float16],
    [2, (32, 16), 1024, 1, 32, False, 64, 1, 4, torch.bfloat16],
    [3, (32, 32), 1024, None, 64, True, 64, 2, 1, torch.float16],
    [4, (4, 1), 1024, 1, 128, False, 16, 2, 2, torch.bfloat16],
    [1, (4, 2), 1024, None, 16, True, 16, 3, 3, torch.float16],
    [2, (4, 4), 1024, 1, 32, False, 32, 3, 4, torch.bfloat16],
    [3, (8, 1), 1024, None, 64, True, 32, 4, 1, torch.float16],
    [4, (8, 2), 1024, 1, 128, False, 64, 4, 2, torch.bfloat16],

    # 大规模配置（2048上下文）
    [1, (16, 1), 2048, 1, 16, True, 16, 1, 1, torch.float16],
    [2, (16, 2), 2048, None, 32, False, 16, 1, 2, torch.bfloat16],
    [3, (16, 4), 2048, 1, 64, True, 32, 2, 1, torch.float16],
    [4, (16, 8), 2048, None, 128, False, 32, 2, 2, torch.bfloat16],
    [1, (16, 16), 2048, 1, 16, True, 64, 3, 3, torch.float16],
    [2, (32, 1), 2048, None, 32, False, 64, 3, 4, torch.bfloat16],
    [3, (32, 2), 2048, 1, 64, True, 16, 4, 1, torch.float16],
    [4, (32, 4), 2048, None, 128, False, 16, 4, 2, torch.bfloat16],
    [1, (32, 8), 2048, 1, 16, True, 32, 1, 3, torch.float16],
    [2, (32, 16), 2048, None, 32, False, 32, 1, 4, torch.bfloat16],
    [3, (32, 32), 2048, 1, 64, True, 64, 2, 1, torch.float16],
    [4, (4, 1), 2048, None, 64, True, 64, 2, 2, torch.bfloat16],
    [1, (4, 2), 2048, 1, 16, True, 16, 3, 3, torch.float16],
    [2, (4, 4), 2048, None, 32, False, 16, 3, 4, torch.bfloat16],
    [3, (8, 1), 2048, 1, 64, True, 32, 4, 1, torch.float16],
    [4, (8, 2), 2048, None, 128, False, 32, 4, 2, torch.bfloat16],
    [1, (8, 4), 2048, 1, 16, True, 64, 1, 3, torch.float16],
    [2, (8, 8), 2048, None, 32, False, 64, 1, 4, torch.bfloat16],
    [3, (16, 1), 2048, 1, 64, True, 16, 2, 1, torch.float16],
    [4, (16, 2), 2048, None, 128, False, 16, 2, 2, torch.bfloat16],

    # 超大上下文配置（4096上下文）
    [1, (4, 4), 4096, None, 16, True, 32, 3, 3, torch.float16],
    [2, (16, 8), 4096, 1, 32, False, 32, 3, 4, torch.bfloat16],
    [3, (16, 16), 4096, None, 64, True, 64, 4, 1, torch.float16],
    [4, (32, 1), 4096, 1, 128, False, 64, 4, 2, torch.bfloat16],
    [1, (16, 8), 4096, None, 16, True, 16, 1, 3, torch.float16],
    [2, (32, 4), 4096, 1, 32, False, 16, 1, 4, torch.bfloat16],
    [3, (32, 16), 4096, None, 64, True, 32, 2, 1, torch.float16],
    [4, (32, 16), 4096, 1, 128, False, 32, 2, 2, torch.bfloat16],
    [1, (32, 32), 4096, None, 16, True, 64, 3, 3, torch.float16],
    [2, (4, 1), 4096, 1, 32, False, 64, 3, 4, torch.bfloat16],
    [3, (4, 2), 4096, None, 64, True, 16, 4, 1, torch.float16],
    [4, (4, 4), 4096, 1, 128, False, 16, 4, 2, torch.bfloat16],
    [1, (8, 1), 4096, None, 16, True, 32, 1, 3, torch.float16],
    [2, (8, 2), 4096, 1, 32, False, 32, 1, 4, torch.bfloat16],
    [3, (8, 4), 4096, None, 64, True, 64, 2, 1, torch.float16],
    [4, (8, 8), 4096, 1, 128, False, 64, 2, 2, torch.bfloat16],
    [1, (16, 16), 4096, None, 16, True, 16, 3, 3, torch.float16],
    [2, (16, 2), 4096, 1, 32, False, 16, 3, 4, torch.bfloat16],
    [3, (16, 16), 4096, None, 64, True, 32, 4, 1, torch.float16],
    [4, (16, 8), 4096, 1, 128, False, 32, 4, 2, torch.bfloat16],

    [1, (32, 1), 2048, 1, 64, True, 64, 2, 3, torch.float16],
    [2, (32, 2), 1024, None, 128, False, 16, 3, 1, torch.bfloat16],
    [3, (16, 8), 512, 1, 32, True, 32, 4, 2, torch.float16],
    [4, (8, 4), 4096, None, 16, False, 64, 1, 4, torch.bfloat16],
    [1, (4, 1), 2048, 1, 128, True, 16, 2, 1, torch.float16],
    [2, (8, 2), 1024, None, 64, False, 32, 3, 2, torch.bfloat16],
    [3, (16, 4), 512, 1, 16, True, 64, 4, 3, torch.float16],
    [4, (32, 16), 4096, None, 32, False, 16, 1, 4, torch.bfloat16],
    [1, (32, 32), 2048, 1, 64, True, 32, 2, 1, torch.float16],
    [2, (4, 4), 1024, None, 64, True, 64, 3, 2, torch.bfloat16],
    [3, (8, 8), 512, 1, 16, True, 16, 4, 3, torch.float16],
    [4, (16, 16), 4096, None, 32, False, 32, 1, 4, torch.bfloat16],
    [1, (4, 2), 2048, 1, 64, True, 64, 2, 1, torch.float16],
    [2, (8, 1), 1024, None, 128, False, 16, 3, 2, torch.bfloat16],
    [3, (16, 2), 512, 1, 16, True, 32, 4, 3, torch.float16],
    [4, (32, 4), 4096, None, 32, False, 64, 1, 4, torch.bfloat16],
    [1, (32, 8), 2048, 1, 64, True, 16, 2, 1, torch.float16],
    [2, (4, 1), 1024, None, 128, False, 32, 3, 2, torch.bfloat16],
    [3, (8, 4), 512, 1, 16, True, 64, 4, 3, torch.float16],
    [4, (16, 1), 4096, None, 32, False, 16, 1, 4, torch.bfloat16]
]

def gen_input(
    batch_size, num_heads, k_len, q_len, head_size, homo_head,
    block_size, local_blocks, vert_strides, dtype
):
    q_len = k_len if q_len is None else q_len
    sigtype_list = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16"
    }
    sigtype = sigtype_list[dtype]

    num_query_heads = num_heads[0]
    num_kv_heads = num_heads[1]

    assert num_query_heads % num_kv_heads == 0

    q = test_common.generate_tensor((batch_size, num_query_heads, q_len, head_size), sigtype).uniform_(0.5, 3.0)
    k = test_common.generate_tensor((batch_size, num_kv_heads, k_len, head_size), sigtype).uniform_(0.5, 3.0)
    v = test_common.generate_tensor((batch_size, num_kv_heads, k_len, head_size), sigtype).uniform_(0.5, 3.0)

    return [item.npu() for item in [q, k, v]] + [homo_head, block_size, local_blocks, vert_strides]