import triton
import triton.language as tl
import test_common
import pytest
import torch
import torch_npu
import numpy as np
import random
import os
import subprocess
import argparse
from softmax import softmax

DEFAULT_TEST_CASES = [
    [torch.float16, "float16", (1, 1024)],
    [torch.float16, "float16", (1, 2048)],
    [torch.float16, "float16", (1, 4096)],
    [torch.float16, "float16", (1, 5120)],
    [torch.float16, "float16", (1, 6656)],
    [torch.float16, "float16", (1, 8192)],
    [torch.float16, "float16", (128, 1024)],
    [torch.float16, "float16", (128, 2048)],
    [torch.float16, "float16", (128, 4096)],
    [torch.float16, "float16", (128, 5120)],
    [torch.float16, "float16", (128, 6656)],
    [torch.float16, "float16", (128, 8192)],
    [torch.float16, "float16", (256, 1024)],
    [torch.float16, "float16", (256, 2048)],
    [torch.float16, "float16", (256, 4096)],
    [torch.float16, "float16", (256, 5120)],
    [torch.float16, "float16", (256, 6656)],
    [torch.float16, "float16", (256, 8192)],
    [torch.float16, "float16", (384, 1024)],
    [torch.float16, "float16", (384, 2048)],
    [torch.float16, "float16", (384, 4096)],
    [torch.float16, "float16", (384, 5120)],
    [torch.float16, "float16", (384, 6656)],
    [torch.float16, "float16", (384, 8192)],
    [torch.float16, "float16", (512, 1024)],
    [torch.float16, "float16", (512, 2048)],
    [torch.float16, "float16", (512, 4096)],
    [torch.float16, "float16", (512, 5120)],
    [torch.float16, "float16", (512, 6656)],
    [torch.float16, "float16", (512, 8192)],
    [torch.float32, "float32", (1, 1024)],
    [torch.float32, "float32", (1, 2048)],
    [torch.float32, "float32", (1, 4096)],
    [torch.float32, "float32", (1, 5120)],
    [torch.float32, "float32", (1, 6656)],
    [torch.float32, "float32", (1, 8192)],
    [torch.float32, "float32", (128, 1024)],
    [torch.float32, "float32", (128, 2048)],
    [torch.float32, "float32", (128, 4096)],
    [torch.float32, "float32", (128, 5120)],
    [torch.float32, "float32", (128, 6656)],
    [torch.float32, "float32", (128, 8192)],
    [torch.float32, "float32", (256, 1024)],
    [torch.float32, "float32", (256, 2048)],
    [torch.float32, "float32", (256, 4096)],
    [torch.float32, "float32", (256, 5120)],
    [torch.float32, "float32", (256, 6656)],
    [torch.float32, "float32", (256, 8192)],
    [torch.float32, "float32", (384, 1024)],
    [torch.float32, "float32", (384, 2048)],
    [torch.float32, "float32", (384, 4096)],
    [torch.float32, "float32", (384, 5120)],
    [torch.float32, "float32", (384, 6656)],
    [torch.float32, "float32", (384, 8192)],
    [torch.float32, "float32", (512, 1024)],
    [torch.float32, "float32", (512, 2048)],
    [torch.float32, "float32", (512, 4096)],
    [torch.float32, "float32", (512, 5120)],
    [torch.float32, "float32", (512, 6656)],
    [torch.float32, "float32", (512, 8192)],
    [torch.float16, "float16", (1, 1024)],
    [torch.float16, "float16", (1, 2048)],
    [torch.float16, "float16", (1, 4096)],
    [torch.float16, "float16", (1, 5120)],
    [torch.float16, "float16", (1, 6656)],
    [torch.float16, "float16", (1, 8192)],
    [torch.float16, "float16", (128, 1024)],
    [torch.float16, "float16", (128, 2048)],
    [torch.float16, "float16", (128, 4096)],
    [torch.float16, "float16", (128, 5120)],
    [torch.float16, "float16", (128, 6656)],
    [torch.float16, "float16", (128, 8192)],
    [torch.float16, "float16", (256, 1024)],
    [torch.float16, "float16", (256, 2048)],
    [torch.float16, "float16", (256, 4096)],
    [torch.float16, "float16", (256, 5120)],
    [torch.float16, "float16", (256, 6656)],
    [torch.float16, "float16", (256, 8192)],
    [torch.float16, "float16", (384, 1024)],
    [torch.float16, "float16", (384, 2048)],
    [torch.float16, "float16", (384, 4096)],
    [torch.float16, "float16", (384, 5120)],
    [torch.float16, "float16", (384, 6656)],
    [torch.float16, "float16", (384, 8192)],
    [torch.float16, "float16", (512, 1024)],
    [torch.float16, "float16", (512, 2048)],
    [torch.float16, "float16", (512, 4096)],
    [torch.float16, "float16", (512, 5120)],
    [torch.float16, "float16", (512, 6656)],
    [torch.float16, "float16", (512, 8192)],
    [torch.float32, "float32", (1, 1024)],
    [torch.float32, "float32", (1, 2048)],
    [torch.float32, "float32", (1, 4096)],
    [torch.float32, "float32", (1, 5120)],
    [torch.float32, "float32", (1, 6656)],
    [torch.float32, "float32", (1, 8192)],
    [torch.float32, "float32", (128, 1024)],
    [torch.float32, "float32", (128, 2048)],
    [torch.float32, "float32", (128, 4096)],
    [torch.float32, "float32", (128, 5120)],
]

SEED = 42

# 后续代码（softmax_torch、parse_test_cases 等）保持不变...

def softmax_torch(x):
    x_max = x.max(dim=-1, keepdim=True)[0]
    z = x - x_max
    numerator = torch.exp(z)
    denominator = numerator.sum(dim=-1, keepdim=True)
    ret = numerator / denominator
    return ret

def parse_test_cases(input_str):
    """解析输入字符串成 TEST_CASES 列表，支持 variable shape 维度"""
    if not input_str:
        return DEFAULT_TEST_CASES
    cases = []
    dtype_map = {
        "float32": torch.float32,
        "float16": torch.float16,
        "bfloat16": torch.bfloat16,
        "float64": torch.float64,
        "int8": torch.int8,
        "int16": torch.int16,
        "int32": torch.int32,
        "int64": torch.int64,
        "uint8": torch.uint8,
        "bool": torch.bool,
    }
    for case_str in input_str.split(','):
        parts = case_str.strip().split()
        if len(parts) < 2:  # 至少 dtype + 一个维度
            raise ValueError(f"Invalid case (at least dtype + one dim): {case_str}")
        dtype_str = parts[0]
        dtype = dtype_map.get(dtype_str)
        if dtype is None:
            raise ValueError(f"Unknown dtype: {dtype_str}")
        try:
            shape = tuple(map(int, parts[1:]))
        except ValueError:
            raise ValueError(f"Invalid shape dimensions in: {case_str}")
        cases.append([dtype, dtype_str, shape])
    return cases

def pytest_generate_tests(metafunc):
    if "test_case" in metafunc.fixturenames:
        test_cases_str = os.environ.get('TEST_CASES', '')
        try:
            TEST_CASES = parse_test_cases(test_cases_str)
        except ValueError as e:
            print(f"Error parsing TEST_CASES: {e}. Using default cases.")
            TEST_CASES = DEFAULT_TEST_CASES
        metafunc.parametrize("test_case", TEST_CASES)

def test_softmax(test_case):
    # torch.manual_seed(SEED)
    # np.random.seed(SEED)
    # random.seed(SEED)
    dtype, sigtype, shape = test_case
    x = test_common.generate_tensor(shape, sigtype).npu()
    # stream = torch.npu.current_stream()
    # experimental_config = torch_npu.profiler._ExperimentalConfig(
    #     aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
    #     profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
    #     l2_cache=True ,
    #     data_simplification=False
    # )
    # test_str = "x".join(map(str, (sigtype, *shape, SEED)))
    # triton_path = "./prof/" + test_str + "/"
    # # triton_path = "./prof/"
    # os.makedirs(triton_path, exist_ok=True)
    # LOOP = 20
    # with torch_npu.profiler.profile(
    #     activities=[
    #         torch_npu.profiler.ProfilerActivity.NPU
    #     ],
    #     schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
    #     on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
    #     record_shapes=False,
    #     profile_memory=False,
    #     with_stack=False,
    #     with_flops=False,
    #     with_modules=False,
    #     experimental_config=experimental_config) as prof:

    #     stream.synchronize()
    #     prof.step()
    #     for i in range(LOOP + 2):
    #         # tri_out = attention(q, k, v, causal, sm_scale,BM,BN).half()
    #         # command = f'msprof op --output=./prof --launch-count=5 --warm-up=5 --kernel-name=softmax_triton pytest {__file__} -v'
    #         # result = subprocess.run(command, shell=True)
    #         y_triton = softmax(x)
    #         prof.step()
    #         if i == 0:
    #             stream.synchronize()
    #     stream.synchronize()
    y_triton = softmax(x)
    return y_triton
    # y_torch = softmax_torch(x)
    # test_common.validate_cmp(sigtype, y_triton, y_torch)

if __name__ == "__main__":
    # 使用 argparse 解析命令行参数，支持 variable dims
    parser = argparse.ArgumentParser(description="Run pytest with custom test case")
    parser.add_argument("--dtype", type=str, default=None, help="Data type for the test (e.g., float32)")
    parser.add_argument("--dims", nargs="*", type=int, default=None, help="Dimensions for shape, e.g. --dims 1123 1012")
    parser.add_argument("--seed", type=int, default=SEED, help="Random seed")
    parser.add_argument("--device", type=int, default=0, help="NPU device ID")
    args = parser.parse_args()
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.npu.set_device(args.device)
    # 如果提供了 dtype 和 dims，则生成单个测试用例字符串；否则使用默认
    if args.dtype and args.dims:
        os.environ['TEST_CASES'] = f"{args.dtype} {' '.join(map(str, args.dims))}"
    else:
        os.environ['TEST_CASES'] = ''  # 使用默认
    dtype_map = {
        "float32": torch.float32,
        "float16": torch.float16,
        "bfloat16": torch.bfloat16,
        "float64": torch.float64,
        "int8": torch.int8,
        "int16": torch.int16,
        "int32": torch.int32,
        "int64": torch.int64,
        "uint8": torch.uint8,
        "bool": torch.bool,
    }
    shape = tuple(args.dims)
    test_case = [dtype_map[args.dtype], args.dtype, shape]
    # 获取shape信息
    
    # 使用 subprocess 运行 pytest，并添加 msprof 命令
    # command = f'msprof op --output=./prof --launch-count=5 --warm-up=5 --kernel-name=softmax_triton pytest {__file__} -v'
    # result = subprocess.run(command, shell=True)
    stream = torch.npu.current_stream()
    experimental_config = torch_npu.profiler._ExperimentalConfig(
        aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
        profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
        l2_cache=True ,
        data_simplification=False
    )
    # test_str = "x".join(map(str, (args.dtype, *shape, args.seed)))
    # triton_path = "./prof/" + test_str + "/"
    triton_path = "./prof/"
    os.makedirs(triton_path, exist_ok=True)
    LOOP = 20
    with torch_npu.profiler.profile(
        activities=[
            torch_npu.profiler.ProfilerActivity.NPU
        ],
        schedule=torch_npu.profiler.schedule(wait=0, warmup=1, active=LOOP, repeat=1, skip_first=1),
        on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(triton_path),
        record_shapes=False,
        profile_memory=False,
        with_stack=False,
        with_flops=False,
        with_modules=False,
        experimental_config=experimental_config) as prof:

        stream.synchronize()
        prof.step()
        for i in range(LOOP + 2):
            # tri_out = attention(q, k, v, causal, sm_scale,BM,BN).half()
            # command = f'msprof op --output=./prof --launch-count=5 --warm-up=5 --kernel-name=softmax_triton pytest {__file__} -v'
            # result = subprocess.run(command, shell=True)
            y_triton = test_softmax(test_case)
            prof.step()
            if i == 0:
                stream.synchronize()
        stream.synchronize()
# python test_softmax.py --dtype float32 --dims 1023 123 --seed 42 --device 7
