import torch
import torch_npu
import numpy as np

epsilon = 1e-5

TEST_CASE_HEADER = ["data_type", 
                    "wdqkv_type", 
                    "wuq_type", 
                    "tokens", 
                    "head_num", 
                    "block_size", 
                    "block_num", 
                    "hidden_size", 
                    "hidden_size_wdqkv", 
                    "hidden_size_wuq_head", 
                    "hidden_size_wuk_head", 
                    "hidden_size_wdq", 
                    "hidden_size_rope_q_head", 
                    "hidden_size_rope_k", 
                    "epsilon", 
                    "transpose_wdqkv", 
                    "transpose_wuq", 
                    "transpose_wuk", 
                    "cache_mode"]
TEST_CASES = [
    # [torch.float16, 500, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0],  #
    # [torch.float16, 500, 32, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0],  #
    # [torch.float16, 500, 32, 128,   192, 6144, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0],  #

    [torch.float16, torch.int8, torch.int8, 100, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    [torch.float16, torch.int8, torch.int8, 200, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    [torch.float16, torch.int8, torch.int8, 300, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    [torch.float16, torch.int8, torch.int8, 400, 64, 128,   192, 7168, 2112, 192, 512, 1536, 128, 128, epsilon,    True, True, True,   1], 
    [torch.float16, torch.int8, torch.int8, 512, 128, 128,   192, 2048, 2112, 192, 512, 1536, 128, 128, epsilon,    True, True, True,   1], 

    [torch.bfloat16, torch.int8, torch.int8, 600, 128, 256,   192, 2048, 2112, 192, 512, 1536, 128, 128, epsilon,    True, True, False,   0], 
    [torch.bfloat16, torch.bfloat16, torch.int8, 700, 128, 256,   192, 2048, 2112, 192, 512, 1536, 128, 128, epsilon,    True, True, False,   0], 
    [torch.bfloat16, torch.int8, torch.bfloat16, 800, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    [torch.bfloat16, torch.bfloat16, torch.bfloat16, 900, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   1], 
    [torch.bfloat16, torch.bfloat16, torch.bfloat16, 1024, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   1], 

    # [torch.float16, torch.int8, torch.int8, 100, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 200, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 300, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 400, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 512, 64, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 

    # [torch.float16, torch.int8, torch.int8, 600, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 700, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 800, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 900, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 1024, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 

    # [torch.float16, torch.int8, torch.int8, 100, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 300, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 500, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 700, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.float16, torch.int8, torch.int8, 900, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    
    # [torch.float16, torch.int8, torch.int8, 100, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 200, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 300, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 400, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 512, 128, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.float16, torch.int8, torch.int8, 600, 128, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 700, 128, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 800, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 900, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 1024, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.float16, torch.int8, torch.int8, 100, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 200, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 300, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 400, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 512, 64, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.float16, torch.int8, torch.int8, 600, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 700, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 800, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 900, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 1024, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.float16, torch.int8, torch.int8, 100, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 300, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 500, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 700, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.float16, torch.int8, torch.int8, 900, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 100, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 200, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 300, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 400, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 512, 128, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 600, 128, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 700, 128, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 800, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 900, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 1024, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 100, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 200, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 300, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 400, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.int8, 512, 64, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 600, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 700, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 800, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 900, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 1024, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 100, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 300, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 500, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 700, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 900, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, True,   0], 
    
    # [torch.bfloat16, torch.int8, torch.int8, 100, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 200, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 300, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 400, 128, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.bfloat16, 512, 128, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 600, 128, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 700, 128, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 800, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 900, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 1024, 128, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 100, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 200, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 300, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 400, 64, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 512, 64, 128,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 600, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 700, 64, 256,   192, 2048, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 800, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 900, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.bfloat16, torch.bfloat16, 1024, 64, 256,   192, 8192, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

    # [torch.bfloat16, torch.int8, torch.int8, 100, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 300, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 500, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 700, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 
    # [torch.bfloat16, torch.int8, torch.int8, 900, 32, 128,   192, 7168, 2112, 192, 512, 1536, 64, 64, epsilon,    True, True, False,   0], 

]

def gen_test_input(
    data_type: torch.dtype, 
    wdqkv_type: torch.dtype, 
    wuq_type: torch.dtype, 
    tokens: int,
    head_num_q: int,
    block_size: int,
    block_num: int,
    hidden_size: int,
    hidden_size_wdqkv: int,
    hidden_size_wuq_head: int,
    hidden_size_wuk_head: int,
    #
    hidden_size_wdq: int, 
    hidden_size_rope_q_head : int, 
    hidden_size_rope_k: int,
    #
    epsilon : float, 
    transpose_wdqkv : bool, 
    transpose_wuq : bool, 
    transpose_wuk : bool, 
    #
    cache_mode: int,     
): 

    

    hidden_size_wdkv = hidden_size_wdqkv - hidden_size_wdq  ## 576
    hidden_size_rms_kv = hidden_size_wdkv - hidden_size_rope_k  ## 512
    hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head

    hidden_size_output_kv = hidden_size_rope_k + hidden_size_rms_kv  ## 576
    hidden_size_output_q = hidden_size_rope_q_head + hidden_size_wuk_head  ## 576

    # 生成模拟输入（实际使用时替换为真实数据）

    input = torch.from_numpy(
        # np.random.uniform(0.0, 0.5, size=(tokens, hidden_size))
        np.random.uniform(-2.0, 2.0, size=(tokens, hidden_size))
    ).to(data_type)
    gamma1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size))
    ).to(data_type)
    beta1 = torch.from_numpy(
        # np.random.randint(-2, 2, (hidden_size)).astype(np.float32)
        np.random.randint(0, 2, (hidden_size)).astype(np.float32)
    ).to(data_type)
    quant_scale1 = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=(1))
        np.random.uniform(10.0, 20.0, size=(1))
    ).to(data_type)
    quant_offset1 = torch.from_numpy(
        # np.random.uniform(-128.0, 127.0, size=(1))
        np.random.uniform(0.0, 31.0, size=(1))
    ).to(torch.int8)
    shape_wdqkv = (hidden_size_wdqkv, hidden_size) if transpose_wdqkv else (hidden_size, hidden_size_wdqkv)
    wdqkv = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=shape_wdqkv)
        np.random.uniform(0.0, 2.0, size=shape_wdqkv)
    ).to(wdqkv_type)
    de_scale1 = torch.from_numpy(
        np.random.uniform(0.0, 1e-6, size = (hidden_size_wdqkv))
    ).to(torch.float32)
    bias1 = torch.from_numpy(
        # np.random.randint(-10, 10, (1, hidden_size_wdqkv)).astype(np.int32)
        np.random.randint(0, 10, (1, hidden_size_wdqkv)).astype(np.int32)
    ).to(torch.int32)


    gamma2 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size_wdq))
    ).to(data_type)
    beta2 = torch.from_numpy(
        # np.random.randint(-2, 2, (hidden_size_wdq)).astype(np.float32)
        np.random.randint(0, 2, (hidden_size_wdq)).astype(np.float32)
    ).to(data_type)
    quant_scale2 = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=(1))
        np.random.uniform(10.0, 20.0, size=(1))
    ).to(data_type)
    quant_offset2 = torch.from_numpy(
        # np.random.uniform(-128.0, 127.0, size=(1))
        np.random.uniform(0.0, 31.0, size=(1))
    ).to(torch.int8)
    shape_wuq = (head_num_q * hidden_size_wuq_head, hidden_size_wdq) if transpose_wuq else (hidden_size_wdq, head_num_q * hidden_size_wuq_head)
    wuq = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=shape_wuq)
        np.random.uniform(0.0, 2.0, size=shape_wuq)
    ).to(wuq_type)
    de_scale2 = torch.from_numpy(
        np.random.uniform(0.0, 1e-6, size = (head_num_q * hidden_size_wuq_head))
    ).to(torch.float32)
    bias2 = torch.from_numpy(
        # np.random.randint(-10, 10, (1, head_num_q * hidden_size_wuq_head)).astype(np.int32)
        np.random.randint(0, 10, (1, head_num_q * hidden_size_wuq_head)).astype(np.int32)
    ).to(torch.int32)


    gamma3 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size_rms_kv))
    ).to(data_type)
    cos1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))
    ).to(data_type)
    sin1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))
    ).to(data_type)
    cos2 = cos1
    sin2 = sin1
    shape_wuk = (head_num_q, hidden_size_nope_q_head, hidden_size_wuk_head) if not transpose_wuk else (head_num_q, hidden_size_wuk_head, hidden_size_nope_q_head)
    wuk = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=shape_wuk)
        np.random.uniform(0.0, 2.0, size=shape_wuk)
    ).to(data_type)
    kv_cache = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(block_num, block_size, 1, hidden_size_output_kv))
    ).to(data_type)
    # ).to(torch.float32)
    slot_mapping = torch.from_numpy(
        np.random.choice(block_num * block_size, tokens, replace=False).astype(np.int32)
    ).to(torch.int32)

    return [item.npu() for item in 
            [input, 
             gamma1, 
             beta1, 
             quant_scale1, 
             quant_offset1, 
             wdqkv, 
             de_scale1, 
             bias1, 
             #
             gamma2, 
             beta2, 
             quant_scale2, 
             quant_offset2, 
             wuq, 
             de_scale2, 
             bias2, 
             #
             gamma3, 
             cos1, 
             sin1, 
             cos2, 
             sin2, 
             wuk, 
             kv_cache, 
             slot_mapping, 
             #
             ]] + [
             hidden_size_wdq, 
             hidden_size_rope_q_head, 
             hidden_size_rope_k, 
             epsilon, 
             transpose_wdqkv, 
             transpose_wuq, 
             transpose_wuk, 
             cache_mode, ]
