# import triton
# import triton.language as tl
# import test_common
# import pytest
# import torch
# import torch_npu
# import numpy as np
# import random
# import os
# import subprocess
# import argparse
# from rmsnorm import rmsnorm

# # 默认 TEST_CASES，如果没有自定义输入
# DEFAULT_TEST_CASES = [
#     # tokenNum, blockSize, cacheMode, hiddenSize, dtype, headNum, blockNum, hs_wdqk_head, hs_wuq_head, hs_wuk_head, hs_wdq, hs_rope_q_head, hs_rope_k
#     [128, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
# ]

# SEED = 42

# def softmax_torch(x):
#     x_max = x.max(dim=-1, keepdim=True)[0]
#     z = x - x_max
#     numerator = torch.exp(z)
#     denominator = numerator.sum(dim=-1, keepdim=True)
#     ret = numerator / denominator
#     return ret

# def parse_test_cases(input_str):
#     """解析输入字符串成 TEST_CASES 列表，支持 variable shape 维度"""
#     if not input_str:
#         return DEFAULT_TEST_CASES
#     cases = []
#     dtype_map = {
#         "float32": torch.float32,
#         "float16": torch.float16,
#         "bfloat16": torch.bfloat16,
#         "float64": torch.float64,
#         "int8": torch.int8,
#         "int16": torch.int16,
#         "int32": torch.int32,
#         "int64": torch.int64,
#         "uint8": torch.uint8,
#         "bool": torch.bool,
#     }
#     for case_str in input_str.split(','):
#         parts = case_str.strip().split()
#         if len(parts) < 2:  # 至少 dtype + 一个维度
#             raise ValueError(f"Invalid case (at least dtype + one dim): {case_str}")
#         dtype_str = parts[0]
#         dtype = dtype_map.get(dtype_str)
#         if dtype is None:
#             raise ValueError(f"Unknown dtype: {dtype_str}")
#         try:
#             shape = tuple(map(int, parts[1:]))
#         except ValueError:
#             raise ValueError(f"Invalid shape dimensions in: {case_str}")
#         cases.append([dtype, dtype_str, shape])
#     return cases

# def pytest_generate_tests(metafunc):
#     if "test_case" in metafunc.fixturenames:
#         test_cases_str = os.environ.get('TEST_CASES', '')
#         try:
#             TEST_CASES = parse_test_cases(test_cases_str)
#         except ValueError as e:
#             print(f"Error parsing TEST_CASES: {e}. Using default cases.")
#             TEST_CASES = DEFAULT_TEST_CASES
#         metafunc.parametrize("test_case", TEST_CASES)


# def test_rmsnorm(test_case):
#     torch.manual_seed(SEED)
#     np.random.seed(SEED)
#     random.seed(SEED)
#     torch.npu.set_device(6)
#     dtype, sigtype, shape = test_case
#     x = test_common.generate_tensor(shape, sigtype).npu()

#     stream = torch.npu.current_stream()
#     experimental_config = torch_npu.profiler._ExperimentalConfig(
#         aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
#         profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
#         l2_cache=True ,
#         data_simplification=False
#     )
#     # test_str = "x".join(map(str, (args.dtype, *shape, args.seed)))
#     # triton_path = "./prof/" + test_str + "/"
#     triton_path = "./prof/"
#     os.makedirs(triton_path, exist_ok=True)
#     LOOP = 20
#     with torch_npu.profiler.profile(
#         activities=[
#             torch_npu.profiler.ProfilerActivity.NPU
#         ],
#         schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
#         on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
#         record_shapes=False,
#         profile_memory=False,
#         with_stack=False,
#         with_flops=False,
#         with_modules=False,
#         experimental_config=experimental_config) as prof:

#         stream.synchronize()
#         prof.step()
#         for i in range(LOOP + 2):
#             # tri_out = attention(q, k, v, causal, sm_scale,BM,BN).half()
#             # command = f'msprof op --output=./prof --launch-count=5 --warm-up=5 --kernel-name=softmax_triton pytest {__file__} -v'
#             # result = subprocess.run(command, shell=True)
#             y_triton = test_rmsnorm(test_case)
#             prof.step()
#             if i == 0:
#                 stream.synchronize()
#         stream.synchronize()
#     return y_triton
#     # y_torch = softmax_torch(x)
#     # test_common.validate_cmp(sigtype, y_triton, y_torch)

# if __name__ == "__main__":
#     # 使用 argparse 解析命令行参数，支持 variable dims
#     parser = argparse.ArgumentParser(description="Run pytest with custom test case")
#     parser.add_argument("--dtype", type=str, default=None, help="Data type for the test (e.g., float32)")
#     parser.add_argument("--dims", nargs="*", type=int, default=None, help="Dimensions for shape, e.g. --dims 1123 1012")
#     parser.add_argument("--seed", type=int, default=SEED, help="Random seed")
#     parser.add_argument("--device", type=int, default=0, help="NPU device ID")
#     args = parser.parse_args()
#     torch.manual_seed(args.seed)
#     np.random.seed(args.seed)
#     random.seed(args.seed)
#     torch.npu.set_device(args.device)
#     # 如果提供了 dtype 和 dims，则生成单个测试用例字符串；否则使用默认
#     # if args.dtype and args.dims:
#     #     os.environ['TEST_CASES'] = f"{args.dtype} {' '.join(map(str, args.dims))}"
#     # else:
#     #     os.environ['TEST_CASES'] = ''  # 使用默认
#     dtype_map = {
#         "float32": torch.float32,
#         "float16": torch.float16,
#         "bfloat16": torch.bfloat16,
#         "float64": torch.float64,
#         "int8": torch.int8,
#         "int16": torch.int16,
#         "int32": torch.int32,
#         "int64": torch.int64,
#         "uint8": torch.uint8,
#         "bool": torch.bool,
#     }
#     shape = tuple(args.dims)
#     test_case = [dtype_map[args.dtype], args.dtype, shape]
#     # 获取shape信息
    
#     # 使用 subprocess 运行 pytest，并添加 msprof 命令
#     # command = f'msprof op --output=./prof --launch-count=5 --warm-up=5 --kernel-name=softmax_triton pytest {__file__} -v'
#     # result = subprocess.run(command, shell=True)
#     stream = torch.npu.current_stream()
#     experimental_config = torch_npu.profiler._ExperimentalConfig(
#         aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
#         profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
#         l2_cache=True ,
#         data_simplification=False
#     )
#     # test_str = "x".join(map(str, (args.dtype, *shape, args.seed)))
#     # triton_path = "./prof/" + test_str + "/"
#     triton_path = "./prof/"
#     os.makedirs(triton_path, exist_ok=True)
#     LOOP = 20
#     with torch_npu.profiler.profile(
#         activities=[
#             torch_npu.profiler.ProfilerActivity.NPU
#         ],
#         schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
#         on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
#         record_shapes=False,
#         profile_memory=False,
#         with_stack=False,
#         with_flops=False,
#         with_modules=False,
#         experimental_config=experimental_config) as prof:

#         stream.synchronize()
#         prof.step()
#         for i in range(LOOP + 2):
#             # tri_out = attention(q, k, v, causal, sm_scale,BM,BN).half()
#             # command = f'msprof op --output=./prof --launch-count=5 --warm-up=5 --kernel-name=softmax_triton pytest {__file__} -v'
#             # result = subprocess.run(command, shell=True)
#             y_triton = test_rmsnorm(test_case)
#             prof.step()
#             if i == 0:
#                 stream.synchronize()
#         stream.synchronize()
# # python test_rmsnorm.py --dtype float32 --dims 1023 123 --seed 42 --device 7


import triton
import triton.language as tl
import pytest
import torch
import torch_npu
import numpy as np
import random
import os
import subprocess
import argparse
from mlapo import mlapo_triton

# 默认 TEST_CASES，按要求填入生成的数据
DEFAULT_TEST_CASES = [
    # [128, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    [1024, 256, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 128, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 256, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "bfloat16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 128, 0, 7168, "bfloat16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 128, 0, 7168, "float16", 128, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [512, 128, 0, 7168, "bfloat16", 16, 192, 2112, 192, 512, 1536, 64, 64],
    # [128, 256, 0, 7168, "float16", 32, 192, 2112, 192, 512, 1536, 64, 64],
    # [1024, 256, 0, 7168, "float16", 64, 192, 2112, 192, 512, 1536, 64, 64],
    # [256, 256, 0, 7168, "float16", 16, 192, 2112, 192, 512, 1536, 64, 64]
]

SEED = 42

def softmax_torch(x):
    x_max = x.max(dim=-1, keepdim=True)[0]
    z = x - x_max
    numerator = torch.exp(z)
    denominator = numerator.sum(dim=-1, keepdim=True)
    ret = numerator / denominator
    return ret

def parse_test_cases():
    # 此处直接返回DEFAULT_TEST_CASES，如需自定义可扩展逻辑
    return DEFAULT_TEST_CASES

def pytest_generate_tests(metafunc):
    if "test_case" in metafunc.fixturenames:
        TEST_CASES = parse_test_cases()
        metafunc.parametrize("test_case", TEST_CASES)

def test_rmsnorm(test_case):
    (token_num, block_size, cache_mode, hidden_size, dtype_str, head_num, 
     block_num, hidden_size_wdqkv, hidden_size_wuq_head, hidden_size_wuk_head, 
     hidden_size_wdq, hidden_size_rope_q_head, hidden_size_rope_k) = test_case
    
    # 映射数据类型
    dtype_map = {
        "float16": torch.float16,
        "bfloat16": torch.bfloat16
    }
    dtype = dtype_map[dtype_str]
    
    torch.manual_seed(SEED)
    np.random.seed(SEED)
    random.seed(SEED)
    torch.npu.set_device(6)  # 固定设备号
    
    # 计算派生参数（与mlapo_test逻辑一致）
    hidden_size_wdkv = hidden_size_wdqkv - hidden_size_wdq  # 576
    hidden_size_rms_kv = hidden_size_wdkv - hidden_size_rope_k  # 512
    hidden_size_nope_q_head = hidden_size_wuq_head - hidden_size_rope_q_head
    
    # 生成核心输入张量（与业务逻辑对齐）
    # 1. 主输入张量
    input_tensor = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=(token_num, hidden_size))
    ).to(dtype).npu()
    
    # 2. RMSNorm相关参数
    gamma1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size))
    ).to(dtype).npu()
    beta1 = torch.from_numpy(
        np.random.randint(-2, 2, (hidden_size)).astype(np.float32)
    ).to(dtype).npu()
    quant_scale1 = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=(1))
    ).to(dtype).npu()
    quant_offset1 = torch.from_numpy(
        np.random.uniform(-128.0, 127.0, size=(1))
    ).to(torch.int8).npu()
    
    # 3. 权重和偏置参数
    transpose_wdqkv = True  # 固定配置，与mlapo_test一致
    shape_wdqkv = (hidden_size_wdqkv, hidden_size) if transpose_wdqkv else (hidden_size, hidden_size_wdqkv)
    wdqkv = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=shape_wdqkv)
    ).to(torch.int8).npu()
    de_scale1 = torch.rand((hidden_size_wdqkv), dtype=torch.float32) / 100  # 缩放因子
    bias1 = torch.from_numpy(
        np.random.randint(20, 40, (1, hidden_size_wdqkv)).astype(np.int32)
    ).to(torch.int32).npu()
    
    # 4. 第二层RMSNorm参数
    gamma2 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size_wdq))
    ).to(dtype).npu()
    beta2 = torch.from_numpy(
        np.random.randint(-2, 2, (hidden_size_wdq)).astype(np.float32)
    ).to(dtype).npu()
    quant_scale2 = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=(1))
    ).to(dtype).npu()
    quant_offset2 = torch.from_numpy(
        np.random.uniform(-128.0, 127.0, size=(1))
    ).to(torch.int8).npu()
    
    # 5. 注意力头相关权重
    transpose_wuq = True  # 固定配置
    shape_wuq = (head_num * hidden_size_wuq_head, hidden_size_wdq) if transpose_wuq else (hidden_size_wdq, head_num * hidden_size_wuq_head)
    wuq = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=shape_wuq)
    ).to(torch.int8).npu()
    de_scale2 = torch.rand((head_num * hidden_size_wuq_head), dtype=torch.float32) / 100
    bias2 = torch.from_numpy(
        np.random.randint(20, 40, (1, head_num * hidden_size_wuq_head)).astype(np.int32)
    ).to(torch.int32).npu()
    
    # 6. RoPE相关参数
    gamma3 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(hidden_size_rms_kv))
    ).to(dtype).npu()
    cos1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(token_num, hidden_size_rope_k))
    ).to(dtype).npu()
    sin1 = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(token_num, hidden_size_rope_k))
    ).to(dtype).npu()
    cos2 = cos1  # 复用cos1
    sin2 = sin1  # 复用sin1
    
    # 7. 缓存和映射参数
    transpose_wuk = True  # 固定配置
    shape_wuk = (head_num, hidden_size_nope_q_head, hidden_size_wuk_head) if not transpose_wuk else (head_num, hidden_size_wuk_head, hidden_size_nope_q_head)
    wuk = torch.from_numpy(
        np.random.uniform(-2.0, 2.0, size=shape_wuk)
    ).to(dtype).npu()
    key_cache = torch.from_numpy(
        np.random.uniform(-1.0, 1.0, size=(block_num, block_size, 1, hidden_size_wdkv))
    ).to(dtype).npu()
    slot_mapping = torch.from_numpy(
        np.random.choice(block_num * block_size, token_num, replace=False).astype(np.int32)
    ).to(torch.int32).npu()
    
    # 固定epsilon参数
    epsilon = 1e-6
    
    stream = torch.npu.current_stream()
    experimental_config = torch_npu.profiler._ExperimentalConfig(
        aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
        profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
        l2_cache=True,
        data_simplification=False
    )
    test_str = "x".join(map(str, (dtype_str, token_num, block_size, head_num, SEED)))
    triton_path = "./prof/" + test_str + "/"
    os.makedirs(triton_path, exist_ok=True)
    LOOP = 20
    
    with torch_npu.profiler.profile(
        activities=[torch_npu.profiler.ProfilerActivity.NPU],
        schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
        on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
        record_shapes=False,
        profile_memory=False,
        with_stack=False,
        with_flops=False,
        with_modules=False,
        experimental_config=experimental_config
    ) as prof:
        stream.synchronize()
        prof.step()
        for i in range(LOOP + 2):
            y_triton = mlapo_triton(
                input_tensor, gamma1, beta1, quant_scale1, quant_offset1,
                wdqkv, de_scale1, bias1, gamma2, beta2, quant_scale2,
                quant_offset2, wuq, de_scale2, bias2, gamma3, cos1, sin1,
                cos2, sin2, wuk, key_cache, slot_mapping,
                hidden_size_wdq=hidden_size_wdq,
                hidden_size_rope_q_head=hidden_size_rope_q_head,
                hidden_size_rope_k=hidden_size_rope_k,
                epsilon=epsilon,
                transpose_wdqkv=transpose_wdqkv,
                transpose_wuq=transpose_wuq,
                transpose_wuk=transpose_wuk,
                cache_mode=cache_mode
            )
            prof.step()
            if i == 0:
                stream.synchronize()
        stream.synchronize()