import random
import torch
import torch_npu
import numpy as np
import sys
import os

sys.path.append(os.path.join(os.path.dirname(__file__), "../../../precision"))
import test_common

TEST_CASE_HEADER = ['q_len', 'kv_lens', 'num_heads', 'head_size', 'num_blocks', 'block_size', 'sliding_window', 'soft_cap', 'dtype']

TEST_CASES = [
    # 格式: [num_seqs, q_len, (kv_lens...), [num_query_heads, num_kv_heads], head_size, num_blocks, block_size, sliding_window, soft_cap, dtype]
    [64, [512,], [8, 4], 64, 64, 32, 128, None, torch.float16],
    [128, [1024,], [16, 4], 64, 64, 64, 128, 10.0, torch.bfloat16],
    [128, [512, 1024], [8, 4], 64, 64, 32, 128, 20.0, torch.float16],
    [128, [512, 1024, 2048], [4, 4], 64, 64, 32, 128, None, torch.bfloat16],
    [1, [512, 2048], [12, 4], 64, 64, 32, 128, 20.0, torch.float16],
    [64, [512, 1024, 2048], [8, 8], 64, 64, 32, 128, 20.0, torch.bfloat16],
    [4, [128, 1024], [16, 4], 64, 64, 32, 128, 20.0, torch.float16],
    [64, [512, 1024], [16, 16], 64, 64, 32, 128, 20.0, torch.bfloat16],
    [256, [2048,], [8, 4], 32, 64, 64, None, None, torch.float16],
    [512, [4096, 3072], [16, 8], 64, 128, 128, 512, 10.0, torch.bfloat16],
    [128, [1024, 2048, 1536, 2560], [32, 16], 32, 256, 64, 128, None, torch.float16],
    [64, [512,], [4, 4], 128, 64, 64, None, 15.0, torch.bfloat16],
    [256, [2048, 3072, 4096], [16, 8], 32, 128, 128, 256, None, torch.float16],
    [1024, [4096, 3072], [32, 16], 64, 256, 64, 1024, 20.0, torch.bfloat16],
    [128, [1024, 2048, 1536, 2560], [16, 4], 32, 64, 128, None, None, torch.float16],
    [512, [2048,], [32, 8], 128, 128, 64, 512, 5.0, torch.bfloat16],
    [64, [512, 1024, 1536], [8, 4], 32, 256, 64, None, None, torch.float16],
    [256, [2048, 3072], [16, 8], 64, 64, 128, 256, 25.0, torch.bfloat16],
    [1024, [4096,], [32, 16], 32, 128, 64, None, None, torch.float16],
    [128, [1024, 2048, 1536, 2560], [32, 8], 64, 256, 128, 128, 10.0, torch.bfloat16],
    [64, [512, 1024, 1536], [16, 4], 32, 64, 64, None, None, torch.float16],
    [256, [2048,], [8, 4], 128, 128, 64, 256, 30.0, torch.bfloat16],
    [512, [2048, 4096], [32, 16], 32, 256, 128, None, None, torch.float16],
    [1024, [4096, 3072, 2048, 1536], [16, 8], 64, 64, 64, 1024, 15.0, torch.bfloat16],
    [64, [512,], [8, 4], 32, 128, 128, None, None, torch.float16],
    [256, [2048, 1024, 3072], [32, 8], 64, 256, 64, 256, None, torch.bfloat16],
    [512, [3072, 4096], [16, 4], 32, 64, 64, None, 20.0, torch.float16],
    [128, [1024, 2048, 1536, 2560], [32, 16], 128, 128, 64, 128, None, torch.bfloat16],
    [256, [2048, 3072], [16, 8], 32, 256, 128, None, 25.0, torch.float16],
    [1024, [4096, 3072, 2048], [32, 16], 64, 64, 64, 1024, None, torch.bfloat16],
    [64, [512, 1024], [8, 4], 32, 128, 128, None, None, torch.float16],
    [256, [2048, 4096, 1536, 3072], [16, 8], 64, 256, 64, 256, 10.0, torch.bfloat16],
    [512, [3072, 4096], [32, 16], 32, 64, 64, None, None, torch.float16],
    [128, [1024, 2048, 1536], [16, 4], 128, 128, 64, 128, 30.0, torch.bfloat16],
    [256, [2048, 1024], [8, 4], 32, 256, 128, None, None, torch.float16],
    [1024, [4096, 3072, 2048, 1536], [32, 8], 64, 64, 64, 1024, 15.0, torch.bfloat16],
    [64, [512, 1024, 1536], [16, 8], 32, 128, 64, None, None, torch.float16],
    [512, [2048, 3072, 4096], [32, 16], 64, 256, 128, 512, 20.0, torch.bfloat16],
    [128, [1024, 2048], [8, 4], 32, 64, 64, None, None, torch.float16],
    [256, [2048, 3072, 1536, 4096], [16, 8], 128, 128, 64, 256, None, torch.bfloat16],
    [1024, [4096, 3072], [32, 16], 32, 256, 128, None, 25.0, torch.float16],
    [64, [512, 1024], [8, 4], 64, 64, 64, 64, None, torch.bfloat16],
    [512, [2048, 4096], [16, 8], 32, 128, 128, None, 10.0, torch.float16],
    [128, [1024, 2048, 1536, 2560], [32, 16], 64, 256, 64, 128, None, torch.bfloat16],
    [256, [2048, 1536], [16, 4], 32, 64, 128, None, 30.0, torch.float16],
    [1024, [4096, 3072], [32, 8], 64, 128, 64, 1024, None, torch.bfloat16],
    [64, [512, 1024, 1536], [8, 4], 32, 256, 64, None, 5.0, torch.float16],
    [256, [2048, 3072, 1536, 4096], [16, 8], 32, 64, 128, 256, None, torch.bfloat16],
    [512, [2048, 4096], [32, 16], 64, 128, 64, None, 15.0, torch.float16],
    [128, [1024, 2048, 1536], [16, 4], 32, 256, 128, 128, None, torch.bfloat16],
    [1024, [4096,], [32, 8], 32, 64, 64, None, 20.0, torch.float16],
    [64, [512, 1024, 1536, 2560], [8, 4], 64, 128, 128, 64, None, torch.bfloat16],
    [256, [2048, 3072], [16, 8], 32, 256, 64, None, None, torch.float16],
    [512, [2048, 4096, 3072], [32, 16], 128, 64, 64, 512, 25.0, torch.bfloat16],
    [128, [1024, 2048, 1536], [8, 4], 32, 128, 128, None, None, torch.float16],
    [1024, [4096, 3072, 2048, 1536], [32, 8], 64, 256, 64, 1024, 10.0, torch.bfloat16],
    [64, [512, 1024], [16, 8], 32, 64, 128, None, None, torch.float16],
    [256, [2048, 1536, 3072], [8, 4], 64, 128, 64, 256, 30.0, torch.bfloat16],
    [512, [2048, 4096], [16, 8], 32, 256, 128, None, None, torch.float16],
    [128, [1024, 2048, 1536, 2560], [32, 16], 32, 64, 64, 128, 15.0, torch.bfloat16],
    [1024, [4096, 3072], [32, 8], 64, 128, 128, None, None, torch.float16],
    [64, [512, 1024, 1536], [8, 4], 32, 256, 64, 64, None, torch.bfloat16],
    [256, [2048, 3072], [16, 8], 128, 64, 64, None, 20.0, torch.float16],
    [512, [2048, 4096, 3072, 1536], [32, 16], 32, 128, 128, 512, None, torch.bfloat16],
    [128, [1024, 2048], [16, 4], 32, 256, 128, None, 5.0, torch.float16],
    [1024, [4096,], [32, 8], 64, 64, 64, 1024, None, torch.bfloat16],
    [64, [512, 1024], [8, 4], 32, 128, 128, None, 25.0, torch.float16],
    [256, [2048, 3072, 1536, 4096], [16, 8], 64, 256, 64, 256, None, torch.bfloat16],
    [512, [2048, 4096], [32, 16], 32, 64, 128, None, 10.0, torch.float16],
    [128, [1024, 2048, 1536], [16, 4], 64, 128, 64, 128, None, torch.bfloat16],
    [1024, [4096, 3072], [32, 8], 32, 256, 64, None, 30.0, torch.float16],
    [64, [512, 1024, 1536, 2560], [8, 4], 32, 64, 128, 64, None, torch.bfloat16],
    [256, [2048, 1536], [16, 8], 64, 128, 128, None, 15.0, torch.float16],
    [512, [2048, 4096], [32, 16], 32, 256, 64, 512, None, torch.bfloat16],
    [128, [1024, 2048], [8, 4], 32, 64, 64, None, None, torch.float16],
    [1024, [4096, 3072, 2048, 1536], [32, 8], 128, 128, 64, 1024, 20.0, torch.bfloat16],
    [64, [512, 1024], [16, 8], 32, 256, 128, None, None, torch.float16],
    [256, [2048, 3072, 1536], [8, 4], 64, 64, 64, 256, 25.0, torch.bfloat16],
    [512, [2048, 4096], [16, 8], 32, 128, 128, None, None, torch.float16],
    [128, [1024, 2048, 1536, 2560], [32, 16], 64, 256, 64, 128, 10.0, torch.bfloat16],
    [1024, [4096,], [32, 8], 32, 64, 128, None, None, torch.float16],
    [64, [512, 1024, 1536], [8, 4], 64, 128, 64, 64, 30.0, torch.bfloat16],
    [256, [2048, 3072], [16, 8], 32, 256, 128, None, None, torch.float16],
    [512, [2048, 4096, 3072, 1536], [32, 16], 32, 64, 64, 512, 5.0, torch.bfloat16],
    [128, [1024, 2048], [16, 4], 64, 128, 128, None, None, torch.float16],
    [1024, [4096, 3072], [32, 8], 32, 256, 64, 1024, 15.0, torch.bfloat16],
    [64, [512, 1024], [8, 4], 32, 64, 64, None, None, torch.float16],
    [256, [2048, 3072, 1536, 4096], [16, 8], 64, 128, 128, 256, 20.0, torch.bfloat16],
    [512, [2048, 4096], [32, 16], 32, 256, 64, None, None, torch.float16],
    [128, [1024, 2048, 1536], [16, 4], 128, 64, 64, 128, 25.0, torch.bfloat16],
    [1024, [4096, 3072], [32, 8], 64, 128, 128, None, None, torch.float16],
    [64, [512, 1024, 1536, 2560], [8, 4], 32, 256, 64, 64, 10.0, torch.bfloat16],
    [256, [2048, 1536], [16, 8], 32, 64, 128, None, None, torch.float16],
    [512, [2048, 4096], [32, 16], 64, 128, 64, 512, 30.0, torch.bfloat16],
    [128, [1024, 2048], [8, 4], 32, 256, 128, None, None, torch.float16],
    [1024, [4096,], [32, 8], 32, 64, 64, 1024, 15.0, torch.bfloat16],
    [64, [512, 1024], [16, 8], 64, 128, 128, None, None, torch.float16],
    [256, [2048, 3072, 1536], [8, 4], 32, 256, 64, 256, 5.0, torch.bfloat16],
    [512, [2048, 4096], [16, 8], 32, 64, 128, None, None, torch.float16],
    [128, [1024, 2048, 1536, 2560], [32, 16], 64, 128, 64, 128, 20.0, torch.bfloat16]
]

def gen_input(
    q_len, kv_lens, num_heads, head_size, num_blocks,
    block_size, sliding_window, soft_cap, dtype
):
    batch_size = len(kv_lens)
    num_query_heads = num_heads[0]
    num_kv_heads = num_heads[1]
    
    max_kv_len = max(kv_lens)

    sigtype_dict = {
        torch.float16: "float16",
        torch.bfloat16: "bfloat16",
    }
    sigtype = sigtype_dict[dtype]

    q = test_common.generate_tensor((batch_size, q_len, num_query_heads, head_size), sigtype).uniform_(0.5, 3.0)
    k = test_common.generate_tensor((num_blocks, block_size, num_kv_heads, head_size), sigtype).uniform_(0.5, 3.0)
    v = test_common.generate_tensor((num_blocks, block_size, num_kv_heads, head_size), sigtype).uniform_(0.5, 3.0)
    
    max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
    block_table = torch.randint(0, num_blocks,
                                 (batch_size, max_num_blocks_per_seq),
                                 dtype=torch.int32)
    kv_lens = torch.tensor(kv_lens, dtype=torch.int32)
    soft_cap = float(soft_cap) if soft_cap is not None else float(0.0)

    return [item.npu() for item in [q, k, v, block_table, kv_lens]] + [sliding_window, soft_cap]