import random
import torch
import torch_npu
import numpy as np

TEST_CASE_HEADER = ["b", "h", "n", "d", "e", "block_size", "dtype"]

TEST_CASES = [
    # 
    # FIXME d e 只能同时等于 64，其他时候会出错  A800 同样出错
    #前50组：torch.float16
    # [1, 4, 200, 32, 64, 256, torch.float16],
    # [1, 4, 300, 64, 128, 256, torch.float16],
    # [1, 4, 400, 128, 256, 256, torch.float16],
    # [1, 4, 512, 256, 512, 256, torch.float16],
    # [1, 4, 600, 32, 128, 256, torch.float16],
    # [1, 4, 700, 64, 256, 256, torch.float16],
    # [1, 4, 800, 128, 512, 256, torch.float16],
    # [1, 4, 900, 256, 64, 256, torch.float16],
    # [1, 4, 1024, 32, 256, 256, torch.float16],
    # [1, 4, 256, 64, 512, 256, torch.float16],
    # #
    # [1, 6, 200, 128, 64, 256, torch.float16],
    # [1, 6, 300, 256, 128, 256, torch.float16],
    # [1, 6, 400, 32, 256, 256, torch.float16],
    # [1, 6, 512, 64, 512, 256, torch.float16],
    # [1, 6, 600, 128, 64, 256, torch.float16],
    # [1, 6, 700, 256, 128, 256, torch.float16],
    # [1, 6, 800, 32, 256, 256, torch.float16],
    # [1, 6, 900, 64, 512, 256, torch.float16],
    # [1, 6, 1024, 128, 64, 256, torch.float16],
    # [1, 6, 256, 256, 128, 256, torch.float16],
    # #
    # [1, 8, 200, 32, 256, 256, torch.float16],
    # [1, 8, 300, 64, 512, 256, torch.float16],
    # [1, 8, 400, 128, 64, 256, torch.float16],
    # [1, 8, 512, 256, 128, 256, torch.float16],
    # [1, 8, 600, 32, 256, 256, torch.float16],
    # [1, 8, 700, 64, 512, 256, torch.float16],
    # [1, 8, 800, 128, 64, 256, torch.float16],
    # [1, 8, 900, 256, 128, 256, torch.float16],
    # [1, 8, 1024, 32, 256, 256, torch.float16],
    # [1, 8, 256, 64, 512, 256, torch.float16],
    # #
    # [1, 12, 200, 128, 64, 256, torch.float16],
    # [1, 12, 300, 256, 128, 256, torch.float16],
    # [1, 12, 400, 32, 256, 256, torch.float16],
    # [1, 12, 512, 64, 512, 256, torch.float16],
    # [1, 12, 600, 128, 64, 256, torch.float16],
    # [1, 12, 700, 256, 128, 256, torch.float16],
    # [1, 12, 800, 32, 256, 256, torch.float16],
    # [1, 12, 900, 64, 512, 256, torch.float16],
    # [1, 12, 1024, 128, 64, 256, torch.float16],
    # [1, 12, 256, 256, 128, 256, torch.float16],
    # #
    # [2, 4, 200, 32, 512, 256, torch.float16],
    # [2, 4, 300, 64, 64, 256, torch.float16],
    # [2, 4, 400, 128, 128, 256, torch.float16],
    # [2, 4, 512, 256, 256, 256, torch.float16],
    # [2, 4, 600, 32, 512, 256, torch.float16],
    # [2, 4, 700, 64, 64, 256, torch.float16],
    # [2, 4, 800, 128, 128, 256, torch.float16],
    # [2, 4, 900, 256, 256, 256, torch.float16],
    # [2, 4, 1024, 32, 512, 256, torch.float16],
    # [2, 4, 256, 64, 64, 256, torch.float16],

    # # 后50组：torch.bfloat16
    # [2, 6, 200, 128, 256, 256, torch.bfloat16],
    # [2, 6, 300, 256, 512, 256, torch.bfloat16],
    # [2, 6, 400, 32, 64, 256, torch.bfloat16],
    # [2, 6, 512, 64, 128, 256, torch.bfloat16],
    # [2, 6, 600, 128, 256, 256, torch.bfloat16],
    # [2, 6, 700, 256, 512, 256, torch.bfloat16],
    # [2, 6, 800, 32, 64, 256, torch.bfloat16],
    # [2, 6, 900, 64, 128, 256, torch.bfloat16],
    # [2, 6, 1024, 128, 256, 256, torch.bfloat16],
    # [2, 6, 256, 256, 512, 256, torch.bfloat16],
    # #
    # [2, 8, 200, 32, 128, 256, torch.bfloat16],
    # [2, 8, 300, 64, 256, 256, torch.bfloat16],
    # [2, 8, 400, 128, 512, 256, torch.bfloat16],
    # [2, 8, 512, 256, 64, 256, torch.bfloat16],
    # [2, 8, 600, 32, 128, 256, torch.bfloat16],
    # [2, 8, 700, 64, 256, 256, torch.bfloat16],
    # [2, 8, 800, 128, 512, 256, torch.bfloat16],
    # [2, 8, 900, 256, 64, 256, torch.bfloat16],
    # [2, 8, 1024, 32, 128, 256, torch.bfloat16],
    # [2, 8, 256, 64, 256, 256, torch.bfloat16],
    # #
    # [2, 12, 200, 128, 512, 256, torch.bfloat16],
    # [2, 12, 300, 256, 64, 256, torch.bfloat16],
    # [2, 12, 400, 32, 128, 256, torch.bfloat16],
    # [2, 12, 512, 64, 256, 256, torch.bfloat16],
    # [2, 12, 600, 128, 512, 256, torch.bfloat16],
    # [2, 12, 700, 256, 64, 256, torch.bfloat16],
    # [2, 12, 800, 32, 128, 256, torch.bfloat16],
    # [2, 12, 900, 64, 256, 256, torch.bfloat16],
    # [2, 12, 1024, 128, 512, 256, torch.bfloat16],
    # [2, 12, 256, 256, 64, 256, torch.bfloat16],
    # #
    # [3, 4, 200, 32, 256, 256, torch.bfloat16],
    # [3, 4, 300, 64, 512, 256, torch.bfloat16],
    # [3, 4, 400, 128, 64, 256, torch.bfloat16],
    # [3, 4, 512, 256, 128, 256, torch.bfloat16],
    # [3, 4, 600, 32, 256, 256, torch.bfloat16],
    # [3, 4, 700, 64, 512, 256, torch.bfloat16],
    # [3, 4, 800, 128, 64, 256, torch.bfloat16],
    # [3, 4, 900, 256, 128, 256, torch.bfloat16],
    # [3, 4, 1024, 32, 256, 256, torch.bfloat16],
    # [3, 4, 256, 64, 512, 256, torch.bfloat16],
    # #
    # [1, 64, 200, 128, 64, 128, torch.bfloat16],
    # [1, 64, 300, 256, 128, 128, torch.bfloat16],
    # [2, 32, 400, 32, 256, 256, torch.bfloat16],
    # [2, 32, 512, 64, 512, 256, torch.bfloat16],
    # [3, 32, 600, 128, 64, 256, torch.bfloat16],
    # [3, 32, 700, 256, 128, 128, torch.float16],
    # [3, 16, 800, 32, 256, 128, torch.float16],
    # [4, 16, 2048, 256, 512, 256, torch.float16],
    # [4, 16, 1024, 128, 64, 256, torch.float16],
    # [4, 16, 2000, 256, 128, 256, torch.float16],

    ####
    [1, 5, 200, 128, 64, 128, torch.bfloat16],
    [1, 5, 300, 256, 128, 128, torch.bfloat16],
    [2, 5, 400, 32, 256, 256, torch.bfloat16],
    [2, 5, 512, 64, 512, 256, torch.bfloat16],
    [3, 5, 600, 128, 64, 256, torch.bfloat16],
    [3, 5, 700, 256, 128, 128, torch.float16],
    [3, 5, 800, 32, 256, 128, torch.float16],
    [4, 5, 2048, 256, 512, 256, torch.float16],
    [4, 5, 1024, 128, 64, 256, torch.float16],
    [4, 5, 2000, 256, 128, 256, torch.float16],
]

def gen_input(
    b, h, n, d, e, block_size, dtype
): 

    q = torch.from_numpy(np.random.uniform(0.0, 0.125, size = (b, h, n, d))).to(dtype)
    k = torch.from_numpy(np.random.uniform(0.0, 0.125, size = (b, h, n, d))).to(dtype)
    v = torch.from_numpy(np.random.uniform(0.0, 0.125, size = (b, h, n, e))).to(dtype)
    ed = torch.zeros(h, dtype=dtype)

    for hi in range(h):
        ed[hi] = 0.1 * (hi + 1)

    kv_history = torch.from_numpy(np.random.uniform(0.0, 0.125, size = (b, h, d, e))).to(dtype)

    return [item.npu() for item in [q, k, v, ed, kv_history, ]] + [block_size]

    