import torch
import torch_npu
import numpy as np



def gen_test_input(
    data_type: torch.dtype, 
    tokens: int,
    head_num_q: int,
    block_size: int,
    block_num: int,
    hidden_size: int,
    hidden_size_wdqkv: int,
    hidden_size_wuq_head: int,
    hidden_size_wuk_head: int,
    #
    hidden_size_wdq: int, 
    hidden_size_rope_q_head : int, 
    hidden_size_rope_k: int,
    #
    epsilon : float, 
    transpose_wdqkv : bool, 
    transpose_wuq : bool, 
    transpose_wuk : bool, 
    #
    cache_mode: int,     
): 
    print(f"data_type: {data_type}")
    print(f"tokens: {tokens}")
    print(f"head_num_q: {head_num_q}")
    print(f"block_size: {block_size}")
    print(f"block_num: {block_num}")
    print(f"hidden_size: {hidden_size}")
    print(f"hidden_size_wdqkv: {hidden_size_wdqkv}")
    print(f"hidden_size_wuq_head: {hidden_size_wuq_head}")
    print(f"hidden_size_wuk_head: {hidden_size_wuk_head}")

    print(f"hidden_size_wdq: {hidden_size_wdq}")
    print(f"hidden_size_rope_q_head: {hidden_size_rope_q_head}")
    print(f"hidden_size_rope_k: {hidden_size_rope_k}")
    print(f"epsilon: {epsilon}")

    print(f"transpose_wdqkv: {transpose_wdqkv}")
    print(f"transpose_wuq: {transpose_wuq}")
    print(f"transpose_wuk: {transpose_wuk}")

    print(f"cache_mode: {cache_mode}")
    

    hidden_size_wdkv = hidden_size_wdqkv - hidden_size_wdq  ## 576
    hidden_size_rms_kv = hidden_size_wdkv - hidden_size_rope_k  ## 512
    hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head

    hidden_size_output_kv = hidden_size_rope_k + hidden_size_rms_kv  ## 576
    hidden_size_output_q = hidden_size_rope_q_head + hidden_size_wuk_head  ## 576

    # 生成模拟输入（实际使用时替换为真实数据）

    input = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size))
        # np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size))
    ).to(data_type)
    gamma1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size))
    ).to(data_type)
    beta1 = torch.from_numpy(
        # np.random.randint(-2, 2, (hidden_size)).astype(np.float32)
        np.random.randint(-1, 1, (hidden_size)).astype(np.float32)
    ).to(data_type)
    quant_scale1 = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=(1))
        np.random.uniform(5.0, 10.0, size=(1))
    ).to(data_type)
    quant_offset1 = torch.from_numpy(
        # np.random.uniform(-128.0, 127.0, size=(1))
        np.random.uniform(-32.0, 31.0, size=(1))
    ).to(torch.int8)
    shape_wdqkv = (hidden_size_wdqkv, hidden_size) if transpose_wdqkv else (hidden_size, hidden_size_wdqkv)
    wdqkv = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=shape_wdqkv)
    ).to(torch.int8) #   torch.bfloat16
    # de_scale1 = torch.rand((hidden_size_wdqkv), dtype=torch.float32) / 1000
    de_scale1 = torch.rand((hidden_size_wdqkv), dtype=torch.float32) / 1e5
    bias1 = torch.from_numpy(
        np.random.randint(-10, 10, (1, hidden_size_wdqkv)).astype(np.int32)
    ).to(torch.int32)

    gamma2 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size_wdq))
    ).to(data_type)
    beta2 = torch.from_numpy(
        # np.random.randint(-2, 2, (hidden_size_wdq)).astype(np.float32)
        np.random.randint(-1, 1, (hidden_size_wdq)).astype(np.float32)
    ).to(data_type)
    quant_scale2 = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=(1))
        np.random.uniform(5.0, 10.0, size=(1))
    ).to(data_type)
    quant_offset2 = torch.from_numpy(
        # np.random.uniform(-128.0, 127.0, size=(1))
        np.random.uniform(-32.0, 31.0, size=(1))
    ).to(torch.int8)
    shape_wuq = (head_num_q * hidden_size_wuq_head, hidden_size_wdq) if transpose_wuq else (hidden_size_wdq, head_num_q * hidden_size_wuq_head)
    wuq = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=shape_wuq)
    ).to(torch.int8) #   torch.bfloat16
    # de_scale2 = torch.rand((head_num_q * hidden_size_wuq_head), dtype=torch.float32) / 1000
    de_scale2 = torch.rand((head_num_q * hidden_size_wuq_head), dtype=torch.float32) / 1e5
    bias2 = torch.from_numpy(
        np.random.randint(-10, 10, (1, head_num_q * hidden_size_wuq_head)).astype(np.int32)
    ).to(torch.int32)

    gamma3 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size_rms_kv))
    ).to(data_type)
    cos1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))
    ).to(data_type)
    sin1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(tokens, hidden_size_rope_k))
    ).to(data_type)
    cos2 = cos1
    sin2 = sin1
    shape_wuk = (head_num_q, hidden_size_nope_q_head, hidden_size_wuk_head) if not transpose_wuk else (head_num_q, hidden_size_wuk_head, hidden_size_nope_q_head)
    wuk = torch.from_numpy(
        # np.random.uniform(-2.0, 2.0, size=shape_wuk)
        np.random.uniform(-1.0, 1.0, size=shape_wuk)
    ).to(data_type)
    kv_cache = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(block_num, block_size, 1, hidden_size_output_kv))
    ).to(data_type)
    # ).to(torch.float32)
    slot_mapping = torch.from_numpy(
        np.random.choice(block_num * block_size, tokens, replace=False).astype(np.int32)
    ).to(torch.int32)

    return (input, gamma1, beta1, quant_scale1, quant_offset1, wdqkv, de_scale1, bias1, 
            gamma2, beta2, quant_scale2, quant_offset2, wuq, de_scale2, bias2, 
            gamma3, cos1, sin1, cos2, sin2, wuk, kv_cache, slot_mapping, )
